diff --git a/Makefile b/Makefile index 283c6236438e..6889ec6a091d 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 12 -SUBLEVEL = 0 +SUBLEVEL = 10 EXTRAVERSION = NAME = Fearless Coyote @@ -631,6 +631,9 @@ include arch/$(SRCARCH)/Makefile KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) +KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) +KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) +KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index 19ebddffb279..02fd1cece6ef 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end; #define ARC_REG_SLC_FLUSH 0x904 #define ARC_REG_SLC_INVALIDATE 0x905 #define ARC_REG_SLC_RGN_START 0x914 +#define ARC_REG_SLC_RGN_START1 0x915 #define ARC_REG_SLC_RGN_END 0x916 +#define ARC_REG_SLC_RGN_END1 0x917 /* Bit val in SLC_CONTROL */ #define SLC_CTRL_DIS 0x001 diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index db7319e9b506..efb79fafff1d 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h @@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void) return IS_ENABLED(CONFIG_ARC_HAS_PAE40); } +extern int pae40_exist_but_not_enab(void); + #endif /* !__ASSEMBLY__ */ #endif diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index a867575a758b..7db283b46ebd 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) static DEFINE_SPINLOCK(lock); unsigned long flags; unsigned int ctrl; + phys_addr_t end; spin_lock_irqsave(&lock, flags); @@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) * END needs to be setup before START (latter triggers the operation) * END can't be same as START, so add (l2_line_sz - 1) to sz */ - write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); - write_aux_reg(ARC_REG_SLC_RGN_START, paddr); + end = paddr + sz + l2_line_sz - 1; + if (is_pae40_enabled()) + write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end)); + + write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end)); + + if (is_pae40_enabled()) + write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr)); + + write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr)); + + /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ + read_aux_reg(ARC_REG_SLC_CTRL); while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); @@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void) __dc_enable(); } +/* + * Cache related boot time checks/setups only needed on master CPU: + * - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES) + * Assume SMP only, so all cores will have same cache config. A check on + * one core suffices for all + * - IOC setup / dma callbacks only need to be done once + */ void __init arc_cache_init_master(void) { unsigned int __maybe_unused cpu = smp_processor_id(); @@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void) printk(arc_cache_mumbojumbo(0, str, sizeof(str))); - /* - * Only master CPU needs to execute rest of function: - * - Assume SMP so all cores will have same cache config so - * any geomtry checks will be same for all - * - IOC setup / dma callbacks only need to be setup once - */ if (!cpu) arc_cache_init_master(); + + /* + * In PAE regime, TLB and cache maintenance ops take wider addresses + * And even if PAE is not enabled in kernel, the upper 32-bits still need + * to be zeroed to keep the ops sane. + * As an optimization for more common !PAE enabled case, zero them out + * once at init, rather than checking/setting to 0 for every runtime op + */ + if (is_isa_arcv2() && pae40_exist_but_not_enab()) { + + if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) + write_aux_reg(ARC_REG_IC_PTAG_HI, 0); + + if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) + write_aux_reg(ARC_REG_DC_PTAG_HI, 0); + + if (l2_line_sz) { + write_aux_reg(ARC_REG_SLC_RGN_END1, 0); + write_aux_reg(ARC_REG_SLC_RGN_START1, 0); + } + } } diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index d0126fdfe2d8..b181f3ee38aa 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -104,6 +104,8 @@ /* A copy of the ASID from the PID reg is kept in asid_cache */ DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; +static int __read_mostly pae_exists; + /* * Utility Routine to erase a J-TLB entry * Caller needs to setup Index Reg (manually or via getIndex) @@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void) mmu->u_dtlb = mmu4->u_dtlb * 4; mmu->u_itlb = mmu4->u_itlb * 4; mmu->sasid = mmu4->sasid; - mmu->pae = mmu4->pae; + pae_exists = mmu->pae = mmu4->pae; } } @@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) return buf; } +int pae40_exist_but_not_enab(void) +{ + return pae_exists && !is_pae40_enabled(); +} + void arc_mmu_init(void) { char str[256]; @@ -859,6 +866,9 @@ void arc_mmu_init(void) /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); #endif + + if (pae40_exist_but_not_enab()) + write_aux_reg(ARC_REG_TLBPD1HI, 0); } /* diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts index 895fa6cfa15a..563901e0ec07 100644 --- a/arch/arm/boot/dts/armada-388-gp.dts +++ b/arch/arm/boot/dts/armada-388-gp.dts @@ -75,7 +75,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pca0_pins>; interrupt-parent = <&gpio0>; - interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; gpio-controller; #gpio-cells = <2>; interrupt-controller; @@ -87,7 +87,7 @@ compatible = "nxp,pca9555"; pinctrl-names = "default"; interrupt-parent = <&gpio0>; - interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + interrupts = <18 IRQ_TYPE_LEVEL_LOW>; gpio-controller; #gpio-cells = <2>; interrupt-controller; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi index 559da17297ef..651299c242ec 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi @@ -507,7 +507,7 @@ pinctrl_pcie: pciegrp { fsl,pins = < /* PCIe reset */ - MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x030b0 + MX6QDL_PAD_EIM_DA0__GPIO3_IO00 0x030b0 MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x030b0 >; }; @@ -668,7 +668,7 @@ &pcie { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pcie>; - reset-gpio = <&gpio6 31 GPIO_ACTIVE_LOW>; + reset-gpio = <&gpio3 0 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/tango4-vantage-1172.dts b/arch/arm/boot/dts/tango4-vantage-1172.dts index 86d8df98802f..13bcc460bcb2 100644 --- a/arch/arm/boot/dts/tango4-vantage-1172.dts +++ b/arch/arm/boot/dts/tango4-vantage-1172.dts @@ -22,7 +22,7 @@ }; ð0 { - phy-connection-type = "rgmii"; + phy-connection-type = "rgmii-id"; phy-handle = <ð0_phy>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h index 4e6e88a6b2f4..2244a94ed9c9 100644 --- a/arch/arm/include/asm/bug.h +++ b/arch/arm/include/asm/bug.h @@ -37,7 +37,7 @@ do { \ ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ "2:\t.asciz " #__file "\n" \ ".popsection\n" \ - ".pushsection __bug_table,\"a\"\n" \ + ".pushsection __bug_table,\"aw\"\n" \ ".align 2\n" \ "3:\t.word 1b, 2b\n" \ "\t.hword " #__line ", 0\n" \ diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index d2315ffd8f12..f13ae153fb24 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* This is the base location for PIE (ET_DYN with INTERP) loads. */ +#define ELF_ET_DYN_BASE 0x400000UL /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c index e62273aacb43..4ffbbd217e82 100644 --- a/arch/arm/mach-mvebu/platsmp.c +++ b/arch/arm/mach-mvebu/platsmp.c @@ -211,7 +211,7 @@ static int mv98dx3236_resume_set_cpu_boot_addr(int hw_cpu, void *boot_addr) return PTR_ERR(base); writel(0, base + MV98DX3236_CPU_RESUME_CTRL_REG); - writel(virt_to_phys(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG); + writel(__pa_symbol(boot_addr), base + MV98DX3236_CPU_RESUME_ADDR_REG); iounmap(base); diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 4d495ec39202..b69e4a4ecdd8 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -75,14 +75,10 @@ timer { compatible = "arm,armv8-timer"; - interrupts = <GIC_PPI 13 - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>, - <GIC_PPI 14 - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>, - <GIC_PPI 11 - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>, - <GIC_PPI 10 - (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>; + interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>, + <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>, + <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>, + <GIC_PPI 10 IRQ_TYPE_LEVEL_HIGH>; }; soc { @@ -223,7 +219,7 @@ reg = <0x18800 0x100>, <0x18C00 0x20>; gpiosb: gpio { #gpio-cells = <2>; - gpio-ranges = <&pinctrl_sb 0 0 29>; + gpio-ranges = <&pinctrl_sb 0 0 30>; gpio-controller; interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h index 366448eb0fb7..a02a57186f56 100644 --- a/arch/arm64/include/asm/bug.h +++ b/arch/arm64/include/asm/bug.h @@ -36,7 +36,7 @@ #ifdef CONFIG_GENERIC_BUG #define __BUG_ENTRY(flags) \ - ".pushsection __bug_table,\"a\"\n\t" \ + ".pushsection __bug_table,\"aw\"\n\t" \ ".align 2\n\t" \ "0: .long 1f - 0b\n\t" \ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 5d1700425efe..3288c2b36731 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -113,12 +113,11 @@ #define ELF_EXEC_PAGESIZE PAGE_SIZE /* - * This is the location that an ET_DYN program is loaded if exec'ed. Typical - * use of this is to invoke "./ld.so someprog" to test out a new version of - * the loader. We need to make sure that it is out of the way of the program - * that it will "exec", and that there is sufficient room for the brk. + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is above 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) #ifndef __ASSEMBLY__ @@ -142,6 +141,7 @@ typedef struct user_fpsimd_state elf_fpregset_t; ({ \ clear_bit(TIF_32BIT, ¤t->mm->context.flags); \ clear_thread_flag(TIF_32BIT); \ + current->personality &= ~READ_IMPLIES_EXEC; \ }) /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ @@ -173,7 +173,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT -#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) +/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ +#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 @@ -187,6 +188,11 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; ((x)->e_flags & EF_ARM_EABI_MASK)) #define compat_start_thread compat_start_thread +/* + * Unlike the native SET_PERSONALITY macro, the compat version inherits + * READ_IMPLIES_EXEC across a fork() since this is the behaviour on + * arch/arm/. + */ #define COMPAT_SET_PERSONALITY(ex) \ ({ \ set_bit(TIF_32BIT, ¤t->mm->context.flags); \ diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 06da8ea16bbe..c7b4995868e1 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -161,9 +161,11 @@ void fpsimd_flush_thread(void) { if (!system_supports_fpsimd()) return; + preempt_disable(); memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); fpsimd_flush_task_state(current); set_thread_flag(TIF_FOREIGN_FPSTATE); + preempt_enable(); } /* diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h index 8d9b1eba89c4..76b2e82ee730 100644 --- a/arch/blackfin/include/asm/bug.h +++ b/arch/blackfin/include/asm/bug.h @@ -21,7 +21,7 @@ #define _BUG_OR_WARN(flags) \ asm volatile( \ "1: .hword %0\n" \ - " .section __bug_table,\"a\",@progbits\n" \ + " .section __bug_table,\"aw\",@progbits\n" \ "2: .long 1b\n" \ " .long %1\n" \ " .short %2\n" \ @@ -38,7 +38,7 @@ #define _BUG_OR_WARN(flags) \ asm volatile( \ "1: .hword %0\n" \ - " .section __bug_table,\"a\",@progbits\n" \ + " .section __bug_table,\"aw\",@progbits\n" \ "2: .long 1b\n" \ " .short %1\n" \ " .org 2b + %2\n" \ diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S index 1910223a9c02..cea2bb1621e6 100644 --- a/arch/mips/dec/int-handler.S +++ b/arch/mips/dec/int-handler.S @@ -147,23 +147,12 @@ * Find irq with highest priority */ # open coded PTR_LA t1, cpu_mask_nr_tbl -#if (_MIPS_SZPTR == 32) +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, cpu_mask_nr_tbl lui t1, %hi(cpu_mask_nr_tbl) addiu t1, %lo(cpu_mask_nr_tbl) - -#endif -#if (_MIPS_SZPTR == 64) - # open coded dla t1, cpu_mask_nr_tbl - .set push - .set noat - lui t1, %highest(cpu_mask_nr_tbl) - lui AT, %hi(cpu_mask_nr_tbl) - daddiu t1, t1, %higher(cpu_mask_nr_tbl) - daddiu AT, AT, %lo(cpu_mask_nr_tbl) - dsll t1, 32 - daddu t1, t1, AT - .set pop +#else +#error GCC `-msym32' option required for 64-bit DECstation builds #endif 1: lw t2,(t1) nop @@ -214,23 +203,12 @@ * Find irq with highest priority */ # open coded PTR_LA t1,asic_mask_nr_tbl -#if (_MIPS_SZPTR == 32) +#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) # open coded la t1, asic_mask_nr_tbl lui t1, %hi(asic_mask_nr_tbl) addiu t1, %lo(asic_mask_nr_tbl) - -#endif -#if (_MIPS_SZPTR == 64) - # open coded dla t1, asic_mask_nr_tbl - .set push - .set noat - lui t1, %highest(asic_mask_nr_tbl) - lui AT, %hi(asic_mask_nr_tbl) - daddiu t1, t1, %higher(asic_mask_nr_tbl) - daddiu AT, AT, %lo(asic_mask_nr_tbl) - dsll t1, 32 - daddu t1, t1, AT - .set pop +#else +#error GCC `-msym32' option required for 64-bit DECstation builds #endif 2: lw t2,(t1) nop diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index de781cf54bc7..da80878f2c0d 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs) return __microMIPS_compute_return_epc(regs); if (cpu_has_mips16) return __MIPS16e_compute_return_epc(regs); - return regs->cp0_epc; - } - - if (!delay_slot(regs)) { + } else if (!delay_slot(regs)) { regs->cp0_epc += 4; return 0; } diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h index fc67947ed658..8b14c2706aa5 100644 --- a/arch/mips/include/asm/cache.h +++ b/arch/mips/include/asm/cache.h @@ -9,6 +9,8 @@ #ifndef _ASM_CACHE_H #define _ASM_CACHE_H +#include <kmalloc.h> + #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h index 9df1a53bcb36..b4e7dfa214eb 100644 --- a/arch/mips/include/asm/mach-ralink/ralink_regs.h +++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h @@ -13,6 +13,8 @@ #ifndef _RALINK_REGS_H_ #define _RALINK_REGS_H_ +#include <linux/io.h> + enum ralink_soc_type { RALINK_UNKNOWN = 0, RT2880_SOC, diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h index d045973ddb33..3ea84acf1814 100644 --- a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h +++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h @@ -33,6 +33,10 @@ #define CVMX_L2C_DBG (CVMX_ADD_IO_SEG(0x0001180080000030ull)) #define CVMX_L2C_CFG (CVMX_ADD_IO_SEG(0x0001180080000000ull)) #define CVMX_L2C_CTL (CVMX_ADD_IO_SEG(0x0001180080800000ull)) +#define CVMX_L2C_ERR_TDTX(block_id) \ + (CVMX_ADD_IO_SEG(0x0001180080A007E0ull) + ((block_id) & 3) * 0x40000ull) +#define CVMX_L2C_ERR_TTGX(block_id) \ + (CVMX_ADD_IO_SEG(0x0001180080A007E8ull) + ((block_id) & 3) * 0x40000ull) #define CVMX_L2C_LCKBASE (CVMX_ADD_IO_SEG(0x0001180080000058ull)) #define CVMX_L2C_LCKOFF (CVMX_ADD_IO_SEG(0x0001180080000060ull)) #define CVMX_L2C_PFCTL (CVMX_ADD_IO_SEG(0x0001180080000090ull)) @@ -66,9 +70,40 @@ ((offset) & 1) * 8) #define CVMX_L2C_WPAR_PPX(offset) (CVMX_ADD_IO_SEG(0x0001180080840000ull) + \ ((offset) & 31) * 8) -#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull)) +union cvmx_l2c_err_tdtx { + uint64_t u64; + struct cvmx_l2c_err_tdtx_s { + __BITFIELD_FIELD(uint64_t dbe:1, + __BITFIELD_FIELD(uint64_t sbe:1, + __BITFIELD_FIELD(uint64_t vdbe:1, + __BITFIELD_FIELD(uint64_t vsbe:1, + __BITFIELD_FIELD(uint64_t syn:10, + __BITFIELD_FIELD(uint64_t reserved_22_49:28, + __BITFIELD_FIELD(uint64_t wayidx:18, + __BITFIELD_FIELD(uint64_t reserved_2_3:2, + __BITFIELD_FIELD(uint64_t type:2, + ;))))))))) + } s; +}; + +union cvmx_l2c_err_ttgx { + uint64_t u64; + struct cvmx_l2c_err_ttgx_s { + __BITFIELD_FIELD(uint64_t dbe:1, + __BITFIELD_FIELD(uint64_t sbe:1, + __BITFIELD_FIELD(uint64_t noway:1, + __BITFIELD_FIELD(uint64_t reserved_56_60:5, + __BITFIELD_FIELD(uint64_t syn:6, + __BITFIELD_FIELD(uint64_t reserved_22_49:28, + __BITFIELD_FIELD(uint64_t wayidx:15, + __BITFIELD_FIELD(uint64_t reserved_2_6:5, + __BITFIELD_FIELD(uint64_t type:2, + ;))))))))) + } s; +}; + union cvmx_l2c_cfg { uint64_t u64; struct cvmx_l2c_cfg_s { diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h new file mode 100644 index 000000000000..a951ad5d65ad --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h @@ -0,0 +1,60 @@ +/***********************license start*************** + * Author: Cavium Networks + * + * Contact: support@xxxxxxxxxxxxxxxxxx + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2017 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Networks for more information + ***********************license end**************************************/ + +#ifndef __CVMX_L2D_DEFS_H__ +#define __CVMX_L2D_DEFS_H__ + +#define CVMX_L2D_ERR (CVMX_ADD_IO_SEG(0x0001180080000010ull)) +#define CVMX_L2D_FUS3 (CVMX_ADD_IO_SEG(0x00011800800007B8ull)) + + +union cvmx_l2d_err { + uint64_t u64; + struct cvmx_l2d_err_s { + __BITFIELD_FIELD(uint64_t reserved_6_63:58, + __BITFIELD_FIELD(uint64_t bmhclsel:1, + __BITFIELD_FIELD(uint64_t ded_err:1, + __BITFIELD_FIELD(uint64_t sec_err:1, + __BITFIELD_FIELD(uint64_t ded_intena:1, + __BITFIELD_FIELD(uint64_t sec_intena:1, + __BITFIELD_FIELD(uint64_t ecc_ena:1, + ;))))))) + } s; +}; + +union cvmx_l2d_fus3 { + uint64_t u64; + struct cvmx_l2d_fus3_s { + __BITFIELD_FIELD(uint64_t reserved_40_63:24, + __BITFIELD_FIELD(uint64_t ema_ctl:3, + __BITFIELD_FIELD(uint64_t reserved_34_36:3, + __BITFIELD_FIELD(uint64_t q3fus:34, + ;)))) + } s; +}; + +#endif diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index 9742202f2a32..e638735cc3ac 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -62,6 +62,7 @@ enum cvmx_mips_space { #include <asm/octeon/cvmx-iob-defs.h> #include <asm/octeon/cvmx-ipd-defs.h> #include <asm/octeon/cvmx-l2c-defs.h> +#include <asm/octeon/cvmx-l2d-defs.h> #include <asm/octeon/cvmx-l2t-defs.h> #include <asm/octeon/cvmx-led-defs.h> #include <asm/octeon/cvmx-mio-defs.h> diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index f702a459a830..e53379c689b2 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) * * @regs: Pointer to pt_regs * @insn: branch instruction to decode - * @returns: -EFAULT on error and forces SIGBUS, and on success + * @returns: -EFAULT on error and forces SIGILL, and on success * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after * evaluating the branch. * @@ -431,7 +431,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* Fall through */ case jr_op: if (NO_R6EMU && insn.r_format.func == jr_op) - goto sigill_r6; + goto sigill_r2r6; regs->cp0_epc = regs->regs[insn.r_format.rs]; break; } @@ -446,7 +446,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, switch (insn.i_format.rt) { case bltzl_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -459,7 +459,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezl_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -473,10 +473,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzal_op: case bltzall_op: if (NO_R6EMU && (insn.i_format.rs || - insn.i_format.rt == bltzall_op)) { - ret = -SIGILL; - break; - } + insn.i_format.rt == bltzall_op)) + goto sigill_r2r6; regs->regs[31] = epc + 8; /* * OK we are here either because we hit a NAL @@ -507,10 +505,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezal_op: case bgezall_op: if (NO_R6EMU && (insn.i_format.rs || - insn.i_format.rt == bgezall_op)) { - ret = -SIGILL; - break; - } + insn.i_format.rt == bgezall_op)) + goto sigill_r2r6; regs->regs[31] = epc + 8; /* * OK we are here either because we hit a BAL @@ -556,6 +552,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* * These are unconditional and in j_format. */ + case jalx_op: case jal_op: regs->regs[31] = regs->cp0_epc + 8; case j_op: @@ -573,7 +570,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, */ case beql_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { @@ -587,7 +584,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bnel_op: if (NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { @@ -601,7 +598,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case blezl_op: /* not really i_format */ if (!insn.i_format.rt && NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case blez_op: /* * Compact branches for R6 for the @@ -636,7 +633,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgtzl_op: if (!insn.i_format.rt && NO_R6EMU) - goto sigill_r6; + goto sigill_r2r6; case bgtz_op: /* * Compact branches for R6 for the @@ -774,35 +771,27 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, #else case bc6_op: /* Only valid for MIPS R6 */ - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; regs->cp0_epc += 8; break; case balc6_op: - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* Compact branch: BALC */ regs->regs[31] = epc + 4; epc += 4 + (insn.i_format.simmediate << 2); regs->cp0_epc = epc; break; case pop66_op: - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* Compact branch: BEQZC || JIC */ regs->cp0_epc += 8; break; case pop76_op: - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* Compact branch: BNEZC || JIALC */ if (!insn.i_format.rs) { /* JIALC: set $31/ra */ @@ -814,10 +803,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case pop10_op: case pop30_op: /* Only valid for MIPS R6 */ - if (!cpu_has_mips_r6) { - ret = -SIGILL; - break; - } + if (!cpu_has_mips_r6) + goto sigill_r6; /* * Compact branches: * bovc, beqc, beqzalc, bnvc, bnec, bnezlac @@ -831,11 +818,17 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, return ret; sigill_dsp: - printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); - force_sig(SIGBUS, current); + pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); + return -EFAULT; +sigill_r2r6: + pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); return -EFAULT; sigill_r6: - pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", + pr_info("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n", current->comm); force_sig(SIGILL, current); return -EFAULT; diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 4eff2aed7360..4c01ee5b88c9 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) } seq_printf(m, "isa\t\t\t:"); - if (cpu_has_mips_r1) + if (cpu_has_mips_1) seq_printf(m, " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 6931fe722a0b..8c99ff1d8c3c 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -895,7 +895,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_exit(regs, regs->regs[2]); + trace_sys_exit(regs, regs_return_value(regs)); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 80ed68b2c95e..27c2f90eeb21 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -371,7 +371,7 @@ EXPORT(sys_call_table) PTR sys_writev PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 49765b44aa9b..65d5aeeb9bdb 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -311,7 +311,7 @@ EXPORT(sys_call_table) PTR sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_io_setup /* 5200 */ PTR sys_io_destroy PTR sys_io_getevents diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 90bad2d1b2d3..cbf190ef9e8a 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -302,7 +302,7 @@ EXPORT(sysn32_call_table) PTR compat_sys_sched_getaffinity PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR compat_sys_io_setup /* 6200 */ PTR sys_io_destroy PTR compat_sys_io_getevents diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 2dd70bd104e1..c30bc520885f 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -371,7 +371,7 @@ EXPORT(sys32_call_table) PTR compat_sys_writev PTR sys_cacheflush PTR sys_cachectl - PTR sys_sysmips + PTR __sys_sysmips PTR sys_ni_syscall /* 4150 */ PTR sys_getsid PTR sys_fdatasync diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dfa7f5796c7..863f958c126c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -29,6 +29,7 @@ #include <linux/sched/task_stack.h> #include <asm/asm.h> +#include <asm/asm-eva.h> #include <asm/branch.h> #include <asm/cachectl.h> #include <asm/cacheflush.h> @@ -131,10 +132,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) __asm__ __volatile__ ( " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" - "1: ll %[old], (%[addr]) \n" + "1: \n" + user_ll("%[old]", "(%[addr])") " move %[tmp], %[new] \n" - "2: sc %[tmp], (%[addr]) \n" - " bnez %[tmp], 4f \n" + "2: \n" + user_sc("%[tmp]", "(%[addr])") + " beqz %[tmp], 4f \n" "3: \n" " .insn \n" " .subsection 2 \n" @@ -192,6 +195,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) unreachable(); } +/* + * mips_atomic_set() normally returns directly via syscall_exit potentially + * clobbering static registers, so be sure to preserve them. + */ +save_static_function(sys_sysmips); + SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2) { switch (cmd) { diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index f12fde10c8ad..261915265942 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -2524,6 +2524,35 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, return 0; } +/* + * Emulate FPU instructions. + * + * If we use FPU hardware, then we have been typically called to handle + * an unimplemented operation, such as where an operand is a NaN or + * denormalized. In that case exit the emulation loop after a single + * iteration so as to let hardware execute any subsequent instructions. + * + * If we have no FPU hardware or it has been disabled, then continue + * emulating floating-point instructions until one of these conditions + * has occurred: + * + * - a non-FPU instruction has been encountered, + * + * - an attempt to emulate has ended with a signal, + * + * - the ISA mode has been switched. + * + * We need to terminate the emulation loop if we got switched to the + * MIPS16 mode, whether supported or not, so that we do not attempt + * to emulate a MIPS16 instruction as a regular MIPS FPU instruction. + * Similarly if we got switched to the microMIPS mode and only the + * regular MIPS mode is supported, so that we do not attempt to emulate + * a microMIPS instruction as a regular MIPS FPU instruction. Or if + * we got switched to the regular MIPS mode and only the microMIPS mode + * is supported, so that we do not attempt to emulate a regular MIPS + * instruction that should cause an Address Error exception instead. + * For simplicity we always terminate upon an ISA mode switch. + */ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, void *__user *fault_addr) { @@ -2609,6 +2638,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; if (sig) break; + /* + * We have to check for the ISA bit explicitly here, + * because `get_isa16_mode' may return 0 if support + * for code compression has been globally disabled, + * or otherwise we may produce the wrong signal or + * even proceed successfully where we must not. + */ + if ((xcp->cp0_epc ^ prevepc) & 0x1) + break; cond_resched(); } while (xcp->cp0_epc > prevepc); diff --git a/arch/mn10300/include/asm/bug.h b/arch/mn10300/include/asm/bug.h index aa6a38886391..811414fb002d 100644 --- a/arch/mn10300/include/asm/bug.h +++ b/arch/mn10300/include/asm/bug.h @@ -21,7 +21,7 @@ do { \ asm volatile( \ " syscall 15 \n" \ "0: \n" \ - " .section __bug_table,\"a\" \n" \ + " .section __bug_table,\"aw\" \n" \ " .long 0b,%0,%1 \n" \ " .previous \n" \ : \ diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h index d2742273a685..07ea467f22fc 100644 --- a/arch/parisc/include/asm/bug.h +++ b/arch/parisc/include/asm/bug.h @@ -27,7 +27,7 @@ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ - "\t.pushsection __bug_table,\"a\"\n" \ + "\t.pushsection __bug_table,\"aw\"\n" \ "2:\t" ASM_WORD_INSN "1b, %c0\n" \ "\t.short %c1, %c2\n" \ "\t.org 2b+%c3\n" \ @@ -50,7 +50,7 @@ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ - "\t.pushsection __bug_table,\"a\"\n" \ + "\t.pushsection __bug_table,\"aw\"\n" \ "2:\t" ASM_WORD_INSN "1b, %c0\n" \ "\t.short %c1, %c2\n" \ "\t.org 2b+%c3\n" \ @@ -64,7 +64,7 @@ do { \ asm volatile("\n" \ "1:\t" PARISC_BUG_BREAK_ASM "\n" \ - "\t.pushsection __bug_table,\"a\"\n" \ + "\t.pushsection __bug_table,\"aw\"\n" \ "2:\t" ASM_WORD_INSN "1b\n" \ "\t.short %c0\n" \ "\t.org 2b+%c1\n" \ diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 5404c6a726b2..9a2a8956a695 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -20,6 +20,8 @@ ** flush/purge and allocate "regular" cacheable pages for everything. */ +#define DMA_ERROR_CODE (~(dma_addr_t)0) + #ifdef CONFIG_PA11 extern const struct dma_map_ops pcxl_dma_ops; extern const struct dma_map_ops pcx_dma_ops; @@ -54,12 +56,13 @@ parisc_walk_tree(struct device *dev) break; } } - BUG_ON(!dev->platform_data); return dev->platform_data; } - -#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu) - + +#define GET_IOC(dev) ({ \ + void *__pdata = parisc_walk_tree(dev); \ + __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ +}) #ifdef CONFIG_IOMMU_CCIO struct parisc_device; diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 59be25764433..a81226257878 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context) mtctl(__space_to_prot(context), 8); } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) +static inline void switch_mm_irqs_off(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) { - if (prev != next) { mtctl(__pa(next->pgd), 25); load_context(next->context); } } +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} +#define switch_mm_irqs_off switch_mm_irqs_off + #define deactivate_mm(tsk,mm) do { } while (0) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 88fe0aad4390..bc208136bbb2 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -34,7 +34,7 @@ struct thread_info { /* thread information allocation */ -#define THREAD_SIZE_ORDER 2 /* PA-RISC requires at least 16k stack */ +#define THREAD_SIZE_ORDER 3 /* PA-RISC requires at least 32k stack */ /* Be sure to hunt all references to this down when you change the size of * the kernel stack */ #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c32a09095216..19c0c141bc3f 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -453,8 +453,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, before it can be accessed through the kernel mapping. */ preempt_disable(); flush_dcache_page_asm(__pa(vfrom), vaddr); - preempt_enable(); copy_page_asm(vto, vfrom); + preempt_enable(); } EXPORT_SYMBOL(copy_user_page); @@ -539,6 +539,10 @@ void flush_cache_mm(struct mm_struct *mm) struct vm_area_struct *vma; pgd_t *pgd; + /* Flush the TLB to avoid speculation if coherency is required. */ + if (parisc_requires_coherency()) + flush_tlb_all(); + /* Flushing the whole cache on each cpu takes forever on rp3440, etc. So, avoid it if the mm isn't too big. */ if (mm_total_size(mm) >= parisc_cache_flush_threshold) { @@ -577,33 +581,21 @@ void flush_cache_mm(struct mm_struct *mm) void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - unsigned long addr; - pgd_t *pgd; - BUG_ON(!vma->vm_mm->context); - if ((end - start) >= parisc_cache_flush_threshold) { - flush_cache_all(); - return; - } + /* Flush the TLB to avoid speculation if coherency is required. */ + if (parisc_requires_coherency()) + flush_tlb_range(vma, start, end); - if (vma->vm_mm->context == mfsp(3)) { - flush_user_dcache_range_asm(start, end); - if (vma->vm_flags & VM_EXEC) - flush_user_icache_range_asm(start, end); + if ((end - start) >= parisc_cache_flush_threshold + || vma->vm_mm->context != mfsp(3)) { + flush_cache_all(); return; } - pgd = vma->vm_mm->pgd; - for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { - unsigned long pfn; - pte_t *ptep = get_ptep(pgd, addr); - if (!ptep) - continue; - pfn = pte_pfn(*ptep); - if (pfn_valid(pfn)) - __flush_cache_page(vma, addr, PFN_PHYS(pfn)); - } + flush_user_dcache_range_asm(start, end); + if (vma->vm_flags & VM_EXEC) + flush_user_icache_range_asm(start, end); } void @@ -612,7 +604,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long BUG_ON(!vma->vm_mm->context); if (pfn_valid(pfn)) { - flush_tlb_page(vma, vmaddr); + if (parisc_requires_coherency()) + flush_tlb_page(vma, vmaddr); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } } diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index ba5e1c7b1f17..ef9a4eea662f 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -380,7 +380,7 @@ static inline int eirr_to_irq(unsigned long eirr) /* * IRQ STACK - used for irq handler */ -#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ +#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */ union irq_stack_union { unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index b64d7d21646e..a45a67d526f8 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -53,6 +53,7 @@ #include <linux/uaccess.h> #include <linux/rcupdate.h> #include <linux/random.h> +#include <linux/nmi.h> #include <asm/io.h> #include <asm/asm-offsets.h> @@ -145,6 +146,7 @@ void machine_power_off(void) /* prevent soft lockup/stalled CPU messages for endless loop. */ rcu_sysrq_start(); + lockup_detector_suspend(); for (;;); } diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 44aeaa9c039f..6308749359e4 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -361,7 +361,7 @@ ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ ENTRY_SAME(add_key) ENTRY_SAME(request_key) /* 265 */ - ENTRY_SAME(keyctl) + ENTRY_COMP(keyctl) ENTRY_SAME(ioprio_set) ENTRY_SAME(ioprio_get) ENTRY_SAME(inotify_init) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 32ec22146141..9fd95fec9717 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -367,7 +367,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ if (!vma || - address < vma->vm_start || address > vma->vm_end) { + address < vma->vm_start || address >= vma->vm_end) { si.si_signo = SIGSEGV; si.si_code = SEGV_MAPERR; break; diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 2b90335194a7..a2cc8010cd72 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h @@ -560,7 +560,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) * Atomically increments @v by 1, so long as @v is non-zero. * Returns non-zero if @v was non-zero, and zero otherwise. */ -static __inline__ long atomic64_inc_not_zero(atomic64_t *v) +static __inline__ int atomic64_inc_not_zero(atomic64_t *v) { long t1, t2; @@ -579,7 +579,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v) : "r" (&v->counter) : "cc", "xer", "memory"); - return t1; + return t1 != 0; } #endif /* __powerpc64__ */ diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 0151af6c2a50..87fcc1948817 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h @@ -18,7 +18,7 @@ #include <asm/asm-offsets.h> #ifdef CONFIG_DEBUG_BUGVERBOSE .macro EMIT_BUG_ENTRY addr,file,line,flags - .section __bug_table,"a" + .section __bug_table,"aw" 5001: PPC_LONG \addr, 5002f .short \line, \flags .org 5001b+BUG_ENTRY_SIZE @@ -29,7 +29,7 @@ .endm #else .macro EMIT_BUG_ENTRY addr,file,line,flags - .section __bug_table,"a" + .section __bug_table,"aw" 5001: PPC_LONG \addr .short \flags .org 5001b+BUG_ENTRY_SIZE @@ -42,14 +42,14 @@ sizeof(struct bug_entry), respectively */ #ifdef CONFIG_DEBUG_BUGVERBOSE #define _EMIT_BUG_ENTRY \ - ".section __bug_table,\"a\"\n" \ + ".section __bug_table,\"aw\"\n" \ "2:\t" PPC_LONG "1b, %0\n" \ "\t.short %1, %2\n" \ ".org 2b+%3\n" \ ".previous\n" #else #define _EMIT_BUG_ENTRY \ - ".section __bug_table,\"a\"\n" \ + ".section __bug_table,\"aw\"\n" \ "2:\t" PPC_LONG "1b\n" \ "\t.short %2\n" \ ".org 2b+%3\n" \ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 09bde6e34f5d..548d9a411a0d 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -23,12 +23,13 @@ #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE 0x20000000 +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \ + 0x100000000UL) #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index da7e9432fa8f..db80b301c080 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -80,9 +80,27 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct task_struct *tsk) { /* Mark this context has been used on the new CPU */ - if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + /* + * This full barrier orders the store to the cpumask above vs + * a subsequent operation which allows this CPU to begin loading + * translations for next. + * + * When using the radix MMU that operation is the load of the + * MMU context id, which is then moved to SPRN_PID. + * + * For the hash MMU it is either the first load from slb_cache + * in switch_slb(), and/or the store of paca->mm_ctx_id in + * copy_mm_to_paca(). + * + * On the read side the barrier is in pte_xchg(), which orders + * the store to the PTE vs the load of mm_cpumask. + */ + smp_mb(); + } + /* 32-bit keeps track of the current PGDIR in the thread struct */ #ifdef CONFIG_PPC32 tsk->thread.pgdir = next->pgd; diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h index 9c0f5db5cf46..67e7e3d990f4 100644 --- a/arch/powerpc/include/asm/pgtable-be-types.h +++ b/arch/powerpc/include/asm/pgtable-be-types.h @@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) unsigned long *p = (unsigned long *)ptep; __be64 prev; + /* See comment in switch_mm_irqs_off() */ prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old), (__force unsigned long)pte_raw(new)); diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h index 8bd3b13fe2fb..369a164b545c 100644 --- a/arch/powerpc/include/asm/pgtable-types.h +++ b/arch/powerpc/include/asm/pgtable-types.h @@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new) { unsigned long *p = (unsigned long *)ptep; + /* See comment in switch_mm_irqs_off() */ return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new)); } #endif diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 7e50e47375d6..a3b6575c7842 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1303,7 +1303,7 @@ static inline void msr_check_and_clear(unsigned long bits) " .llong 0\n" \ ".previous" \ : "=r" (rval) \ - : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \ + : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \ rval;}) #else #define mftb() ({unsigned long rval; \ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5c291df30fe3..40d8b552d15a 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -145,6 +145,19 @@ notrace unsigned int __check_irq_replay(void) /* Clear bit 0 which we wouldn't clear otherwise */ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; + if (happened & PACA_IRQ_HARD_DIS) { + /* + * We may have missed a decrementer interrupt if hard disabled. + * Check the decrementer register in case we had a rollover + * while hard disabled. + */ + if (!(happened & PACA_IRQ_DEC)) { + if (decrementer_check_overflow()) { + local_paca->irq_happened |= PACA_IRQ_DEC; + happened |= PACA_IRQ_DEC; + } + } + } /* * Force the delivery of pending soft-disabled interrupts on PS3. @@ -170,7 +183,7 @@ notrace unsigned int __check_irq_replay(void) * in case we also had a rollover while hard disabled */ local_paca->irq_happened &= ~PACA_IRQ_DEC; - if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) + if (happened & PACA_IRQ_DEC) return 0x900; /* Finally check if an external interrupt happened */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index c119044cad0d..8ac0bd2bddb0 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -614,6 +614,18 @@ _GLOBAL(kexec_sequence) li r0,0 std r0,16(r1) +BEGIN_FTR_SECTION + /* + * This is the best time to turn AMR/IAMR off. + * key 0 is used in radix for supervisor<->user + * protection, but on hash key 0 is reserved + * ideally we want to enter with a clean state. + * NOTE, we rely on r0 being 0 from above. + */ + mtspr SPRN_IAMR,r0 + mtspr SPRN_AMOR,r0 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + /* save regs for local vars on new stack. * yes, we won't go back, but ... */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 2ad725ef4368..318738f3aa05 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -362,7 +362,8 @@ void enable_kernel_vsx(void) cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX); - if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) { + if (current->thread.regs && + (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) { check_if_tm_restore_required(current); /* * If a thread has already been reclaimed then the @@ -386,7 +387,7 @@ void flush_vsx_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); - if (tsk->thread.regs->msr & MSR_VSX) { + if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) { BUG_ON(tsk != current); giveup_vsx(tsk); } diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 925a4ef90559..660ed39e9c9a 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk) * If task is not current, it will have been flushed already to * it's thread_struct during __switch_to(). * - * A reclaim flushes ALL the state. + * A reclaim flushes ALL the state or if not in TM save TM SPRs + * in the appropriate thread structures from live. */ - if (tsk == current && MSR_TM_SUSPENDED(mfmsr())) - tm_reclaim_current(TM_CAUSE_SIGNAL); + if (tsk != current) + return; + if (MSR_TM_SUSPENDED(mfmsr())) { + tm_reclaim_current(TM_CAUSE_SIGNAL); + } else { + tm_enable(); + tm_save_sprs(&(tsk->thread)); + } } #else static inline void flush_tmregs_to_thread(struct task_struct *tsk) { } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 857129acf960..94a948207cd2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -335,6 +335,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) maj = ((pvr >> 8) & 0xFF) - 1; min = pvr & 0xFF; break; + case 0x004e: /* POWER9 bits 12-15 give chip type */ + maj = (pvr >> 8) & 0x0F; + min = pvr & 0xFF; + break; default: maj = (pvr >> 8) & 0xFF; min = pvr & 0xFF; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 710e491206ed..1c10e26cebbb 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -164,8 +164,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) goto out; } - if (kvm->arch.hpt.virt) + if (kvm->arch.hpt.virt) { kvmppc_free_hpt(&kvm->arch.hpt); + kvmppc_rmap_reset(kvm); + } err = kvmppc_allocate_hpt(&info, order); if (err < 0) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8d1a365b8edc..1d3602f7ec22 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2938,6 +2938,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) run->fail_entry.hardware_entry_failure_reason = 0; return -EINVAL; } + /* Enable TM so we can read the TM SPRs */ + mtmsr(mfmsr() | MSR_TM); current->thread.tm_tfhar = mfspr(SPRN_TFHAR); current->thread.tm_tfiar = mfspr(SPRN_TFIAR); current->thread.tm_texasr = mfspr(SPRN_TEXASR); diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index ffe1da95033a..08b200a0bbce 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -1257,8 +1257,8 @@ static void xive_pre_save_scan(struct kvmppc_xive *xive) if (!xc) continue; for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { - if (xc->queues[i].qpage) - xive_pre_save_queue(xive, &xc->queues[i]); + if (xc->queues[j].qpage) + xive_pre_save_queue(xive, &xc->queues[j]); } } diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 33117f8a0882..ee33327686ae 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -683,8 +683,10 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, case 19: switch ((instr >> 1) & 0x3ff) { case 0: /* mcrf */ - rd = (instr >> 21) & 0x1c; - ra = (instr >> 16) & 0x1c; + rd = 7 - ((instr >> 23) & 0x7); + ra = 7 - ((instr >> 18) & 0x7); + rd *= 4; + ra *= 4; val = (regs->ccr >> ra) & 0xf; regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd); goto instr_done; @@ -964,6 +966,19 @@ int analyse_instr(struct instruction_op *op, struct pt_regs *regs, #endif case 19: /* mfcr */ + if ((instr >> 20) & 1) { + imm = 0xf0000000UL; + for (sh = 0; sh < 8; ++sh) { + if (instr & (0x80000 >> sh)) { + regs->gpr[rd] = regs->ccr & imm; + break; + } + imm >>= 4; + } + + goto instr_done; + } + regs->gpr[rd] = regs->ccr; regs->gpr[rd] &= 0xffffffffUL; goto instr_done; diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index a3edf813d455..51303f8af015 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c @@ -223,9 +223,15 @@ void destroy_context(struct mm_struct *mm) mm->context.cop_lockp = NULL; #endif /* CONFIG_PPC_ICSWX */ - if (radix_enabled()) - process_tb[mm->context.id].prtb1 = 0; - else + if (radix_enabled()) { + /* + * Radix doesn't have a valid bit in the process table + * entries. However we know that at least P9 implementation + * will avoid caching an entry with an invalid RTS field, + * and 0 is invalid. So this will do. + */ + process_tb[mm->context.id].prtb0 = 0; + } else subpage_prot_free(mm); destroy_pagetable_page(mm); __destroy_context(mm->context.id); diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index c28165d8970b..af3b552b5f8d 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -19,6 +19,7 @@ #include <asm/mmu.h> #include <asm/firmware.h> #include <asm/powernv.h> +#include <asm/sections.h> #include <trace/events/thp.h> @@ -121,7 +122,8 @@ static inline void __meminit print_mapping(unsigned long start, static int __meminit create_physical_mapping(unsigned long start, unsigned long end) { - unsigned long addr, mapping_size = 0; + unsigned long vaddr, addr, mapping_size = 0; + pgprot_t prot; start = _ALIGN_UP(start, PAGE_SIZE); for (addr = start; addr < end; addr += mapping_size) { @@ -145,8 +147,15 @@ static int __meminit create_physical_mapping(unsigned long start, start = addr; } - rc = radix__map_kernel_page((unsigned long)__va(addr), addr, - PAGE_KERNEL_X, mapping_size); + vaddr = (unsigned long)__va(addr); + + if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || + overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) + prot = PAGE_KERNEL_X; + else + prot = PAGE_KERNEL; + + rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size); if (rc) return rc; } diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 8125160be7bc..3f3aa9a7063a 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -90,13 +90,15 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) * MMCRA[SDAR_MODE] will be set to 0b01 * For rest * MMCRA[SDAR_MODE] will be set from event code. + * If sdar_mode from event is zero, default to 0b01. Hardware + * requires that we set a non-zero value. */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE)) *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES; - else if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) + else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event)) *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; - else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + else *mmcra |= MMCRA_SDAR_MODE_TLB; } else *mmcra |= MMCRA_SDAR_MODE_TLB; diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 445f30a2c5ef..0c21747ed7e0 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -261,6 +261,7 @@ static u64 pnv_deepest_stop_psscr_val; static u64 pnv_deepest_stop_psscr_mask; static bool deepest_stop_found; +#ifdef CONFIG_HOTPLUG_CPU /* * pnv_cpu_offline: A function that puts the CPU into the deepest * available platform idle state on a CPU-Offline. @@ -293,6 +294,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu) return srr1; } +#endif /* * Power ISA 3.0 idle initialization. diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 6541d0b03e4c..495ba4e7336d 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -301,7 +301,7 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, int ssize, unsigned long inv_flags) { unsigned long lpar_rc; - unsigned long flags = (newpp & 7) | H_AVPN; + unsigned long flags; unsigned long want_v; want_v = hpte_encode_avpn(vpn, psize, ssize); @@ -309,6 +309,11 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot, pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", want_v, slot, flags, psize); + flags = (newpp & 7) | H_AVPN; + if (mmu_has_feature(MMU_FTR_KERNEL_RO)) + /* Move pp0 into bit 8 (IBM 55) */ + flags |= (newpp & HPTE_R_PP0) >> 55; + lpar_rc = plpar_pte_protect(flags, slot, want_v); if (lpar_rc == H_NOT_FOUND) { @@ -380,6 +385,10 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, BUG_ON(slot == -1); flags = newpp & 7; + if (mmu_has_feature(MMU_FTR_KERNEL_RO)) + /* Move pp0 into bit 8 (IBM 55) */ + flags |= (newpp & HPTE_R_PP0) >> 55; + lpar_rc = plpar_pte_protect(flags, slot, 0); BUG_ON(lpar_rc != H_SUCCESS); diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index e5bf1e84047f..011ef2180fe6 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c @@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np) of_detach_node(np); of_node_put(parent); - of_node_put(np); /* Must decrement the refcount */ return 0; } diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 1bbd9dbfe4e0..ce9cc123988b 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -14,7 +14,7 @@ ".section .rodata.str,\"aMS\",@progbits,1\n" \ "2: .asciz \""__FILE__"\"\n" \ ".previous\n" \ - ".section __bug_table,\"a\"\n" \ + ".section __bug_table,\"aw\"\n" \ "3: .long 1b-3b,2b-3b\n" \ " .short %0,%1\n" \ " .org 3b+%2\n" \ @@ -30,7 +30,7 @@ asm volatile( \ "0: j 0b+2\n" \ "1:\n" \ - ".section __bug_table,\"a\"\n" \ + ".section __bug_table,\"aw\"\n" \ "2: .long 1b-2b\n" \ " .short %0\n" \ " .org 2b+%1\n" \ diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index e8f623041769..7c58d599f91b 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -161,14 +161,13 @@ extern unsigned int vdso_enabled; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. 64-bit - tasks are aligned to 4GB. */ -#define ELF_ET_DYN_BASE (is_compat_task() ? \ - (STACK_TOP / 3 * 2) : \ - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index 6ba0bf928909..6bc941be6921 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task, { unsigned long mask = -1UL; + /* + * No arguments for this syscall, there's nothing to do. + */ + if (!n) + return; + BUG_ON(i + n > 6); #ifdef CONFIG_COMPAT if (test_tsk_thread_flag(task, TIF_31BIT)) diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c index 926b5244263e..a2e5c24f47a7 100644 --- a/arch/s390/kvm/sthyi.c +++ b/arch/s390/kvm/sthyi.c @@ -394,7 +394,7 @@ static int sthyi(u64 vaddr) "srl %[cc],28\n" : [cc] "=d" (cc) : [code] "d" (code), [addr] "a" (addr) - : "memory", "cc"); + : "3", "memory", "cc"); return cc; } @@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu) VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr); trace_kvm_s390_handle_sthyi(vcpu, code, addr); - if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK) + if (reg1 == reg2 || reg1 & 1 || reg2 & 1) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (code & 0xffff) { @@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu) goto out; } + if (addr & ~PAGE_MASK) + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); + /* * If the page has not yet been faulted in, we want to do that * now and not after all the expensive calculations. diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 6e97a2e3fd8d..8cea684f1f53 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1253,7 +1253,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) insn_count = bpf_jit_insn(jit, fp, i); if (insn_count < 0) return -1; - jit->addrs[i + 1] = jit->prg; /* Next instruction address */ + /* Next instruction address */ + jit->addrs[i + insn_count] = jit->prg; } bpf_jit_epilogue(jit); diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h index 1b77f068be2b..986c8781d89f 100644 --- a/arch/sh/include/asm/bug.h +++ b/arch/sh/include/asm/bug.h @@ -24,14 +24,14 @@ */ #ifdef CONFIG_DEBUG_BUGVERBOSE #define _EMIT_BUG_ENTRY \ - "\t.pushsection __bug_table,\"a\"\n" \ + "\t.pushsection __bug_table,\"aw\"\n" \ "2:\t.long 1b, %O1\n" \ "\t.short %O2, %O3\n" \ "\t.org 2b+%O4\n" \ "\t.popsection\n" #else #define _EMIT_BUG_ENTRY \ - "\t.pushsection __bug_table,\"a\"\n" \ + "\t.pushsection __bug_table,\"aw\"\n" \ "2:\t.long 1b\n" \ "\t.short %O3\n" \ "\t.org 2b+%O4\n" \ diff --git a/arch/sparc/include/asm/asm-prototypes.h b/arch/sparc/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..d381e11c5dbb --- /dev/null +++ b/arch/sparc/include/asm/asm-prototypes.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. + */ + +#include <asm/xor.h> +#include <asm/checksum.h> +#include <asm/trap_block.h> +#include <asm/uaccess.h> +#include <asm/atomic.h> +#include <asm/ftrace.h> +#include <asm/cacheflush.h> +#include <asm/oplib.h> +#include <linux/atomic.h> + +void *__memscan_zero(void *, size_t); +void *__memscan_generic(void *, int, size_t); +void *__bzero(void *, size_t); +void VISenter(void); /* Dummy prototype to supress warning */ +#undef memcpy +#undef memset +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); +typedef int TItype __attribute__((mode(TI))); +TItype __multi3(TItype a, TItype b); diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 2cddcda4f85f..87841d687f8d 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm); void __tsb_context_switch(unsigned long pgd_pa, struct tsb_config *tsb_base, struct tsb_config *tsb_huge, - unsigned long tsb_descr_pa); + unsigned long tsb_descr_pa, + unsigned long secondary_ctx); -static inline void tsb_context_switch(struct mm_struct *mm) +static inline void tsb_context_switch_ctx(struct mm_struct *mm, + unsigned long ctx) { __tsb_context_switch(__pa(mm->pgd), &mm->context.tsb_block[MM_TSB_BASE], @@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm) #else NULL #endif - , __pa(&mm->context.tsb_descr[MM_TSB_BASE])); + , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), + ctx); } +#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0) + void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss); @@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * cpu0 to update it's TSB because at that point the cpu_vm_mask * only had cpu1 set in it. */ - load_secondary_context(mm); - tsb_context_switch(mm); + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); /* Any time a processor runs a context on an address space * for the first time, we must flush that context out of the diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index ec9c04de3664..ff05992dae7a 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS]; void init_cur_cpu_trap(struct thread_info *); void setup_tba(void); extern int ncpus_probed; +extern u64 cpu_mondo_counter[NR_CPUS]; unsigned long real_hard_smp_processor_id(void); diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 68bec7c97cb8..af6ac9c5d32e 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -1241,8 +1241,6 @@ static int pci_sun4v_probe(struct platform_device *op) * ATU group, but ATU hcalls won't be available. */ hv_atu = false; - pr_err(PFX "Could not register hvapi ATU err=%d\n", - err); } else { pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", vatu_major, vatu_minor); diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index fdf31040a7dc..3218bc43302e 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -622,22 +622,48 @@ static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt) } } -/* Multi-cpu list version. */ +#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid]) +#define MONDO_USEC_WAIT_MIN 2 +#define MONDO_USEC_WAIT_MAX 100 +#define MONDO_RETRY_LIMIT 500000 + +/* Multi-cpu list version. + * + * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'. + * Sometimes not all cpus receive the mondo, requiring us to re-send + * the mondo until all cpus have received, or cpus are truly stuck + * unable to receive mondo, and we timeout. + * Occasionally a target cpu strand is borrowed briefly by hypervisor to + * perform guest service, such as PCIe error handling. Consider the + * service time, 1 second overall wait is reasonable for 1 cpu. + * Here two in-between mondo check wait time are defined: 2 usec for + * single cpu quick turn around and up to 100usec for large cpu count. + * Deliver mondo to large number of cpus could take longer, we adjusts + * the retry count as long as target cpus are making forward progress. + */ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) { - int retries, this_cpu, prev_sent, i, saw_cpu_error; + int this_cpu, tot_cpus, prev_sent, i, rem; + int usec_wait, retries, tot_retries; + u16 first_cpu = 0xffff; + unsigned long xc_rcvd = 0; unsigned long status; + int ecpuerror_id = 0; + int enocpu_id = 0; u16 *cpu_list; + u16 cpu; this_cpu = smp_processor_id(); - cpu_list = __va(tb->cpu_list_pa); - - saw_cpu_error = 0; - retries = 0; + usec_wait = cnt * MONDO_USEC_WAIT_MIN; + if (usec_wait > MONDO_USEC_WAIT_MAX) + usec_wait = MONDO_USEC_WAIT_MAX; + retries = tot_retries = 0; + tot_cpus = cnt; prev_sent = 0; + do { - int forward_progress, n_sent; + int n_sent, mondo_delivered, target_cpu_busy; status = sun4v_cpu_mondo_send(cnt, tb->cpu_list_pa, @@ -645,94 +671,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) /* HV_EOK means all cpus received the xcall, we're done. */ if (likely(status == HV_EOK)) - break; + goto xcall_done; + + /* If not these non-fatal errors, panic */ + if (unlikely((status != HV_EWOULDBLOCK) && + (status != HV_ECPUERROR) && + (status != HV_ENOCPU))) + goto fatal_errors; /* First, see if we made any forward progress. + * + * Go through the cpu_list, count the target cpus that have + * received our mondo (n_sent), and those that did not (rem). + * Re-pack cpu_list with the cpus remain to be retried in the + * front - this simplifies tracking the truly stalled cpus. * * The hypervisor indicates successful sends by setting * cpu list entries to the value 0xffff. + * + * EWOULDBLOCK means some target cpus did not receive the + * mondo and retry usually helps. + * + * ECPUERROR means at least one target cpu is in error state, + * it's usually safe to skip the faulty cpu and retry. + * + * ENOCPU means one of the target cpu doesn't belong to the + * domain, perhaps offlined which is unexpected, but not + * fatal and it's okay to skip the offlined cpu. */ + rem = 0; n_sent = 0; for (i = 0; i < cnt; i++) { - if (likely(cpu_list[i] == 0xffff)) + cpu = cpu_list[i]; + if (likely(cpu == 0xffff)) { n_sent++; + } else if ((status == HV_ECPUERROR) && + (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { + ecpuerror_id = cpu + 1; + } else if (status == HV_ENOCPU && !cpu_online(cpu)) { + enocpu_id = cpu + 1; + } else { + cpu_list[rem++] = cpu; + } } - forward_progress = 0; - if (n_sent > prev_sent) - forward_progress = 1; + /* No cpu remained, we're done. */ + if (rem == 0) + break; - prev_sent = n_sent; + /* Otherwise, update the cpu count for retry. */ + cnt = rem; - /* If we get a HV_ECPUERROR, then one or more of the cpus - * in the list are in error state. Use the cpu_state() - * hypervisor call to find out which cpus are in error state. + /* Record the overall number of mondos received by the + * first of the remaining cpus. */ - if (unlikely(status == HV_ECPUERROR)) { - for (i = 0; i < cnt; i++) { - long err; - u16 cpu; + if (first_cpu != cpu_list[0]) { + first_cpu = cpu_list[0]; + xc_rcvd = CPU_MONDO_COUNTER(first_cpu); + } - cpu = cpu_list[i]; - if (cpu == 0xffff) - continue; + /* Was any mondo delivered successfully? */ + mondo_delivered = (n_sent > prev_sent); + prev_sent = n_sent; - err = sun4v_cpu_state(cpu); - if (err == HV_CPU_STATE_ERROR) { - saw_cpu_error = (cpu + 1); - cpu_list[i] = 0xffff; - } - } - } else if (unlikely(status != HV_EWOULDBLOCK)) - goto fatal_mondo_error; + /* or, was any target cpu busy processing other mondos? */ + target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu)); + xc_rcvd = CPU_MONDO_COUNTER(first_cpu); - /* Don't bother rewriting the CPU list, just leave the - * 0xffff and non-0xffff entries in there and the - * hypervisor will do the right thing. - * - * Only advance timeout state if we didn't make any - * forward progress. + /* Retry count is for no progress. If we're making progress, + * reset the retry count. */ - if (unlikely(!forward_progress)) { - if (unlikely(++retries > 10000)) - goto fatal_mondo_timeout; - - /* Delay a little bit to let other cpus catch up - * on their cpu mondo queue work. - */ - udelay(2 * cnt); + if (likely(mondo_delivered || target_cpu_busy)) { + tot_retries += retries; + retries = 0; + } else if (unlikely(retries > MONDO_RETRY_LIMIT)) { + goto fatal_mondo_timeout; } - } while (1); - if (unlikely(saw_cpu_error)) - goto fatal_mondo_cpu_error; + /* Delay a little bit to let other cpus catch up on + * their cpu mondo queue work. + */ + if (!mondo_delivered) + udelay(usec_wait); - return; + retries++; + } while (1); -fatal_mondo_cpu_error: - printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " - "(including %d) were in error state\n", - this_cpu, saw_cpu_error - 1); +xcall_done: + if (unlikely(ecpuerror_id > 0)) { + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", + this_cpu, ecpuerror_id - 1); + } else if (unlikely(enocpu_id > 0)) { + pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", + this_cpu, enocpu_id - 1); + } return; +fatal_errors: + /* fatal errors include bad alignment, etc */ + pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", + this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); + panic("Unexpected SUN4V mondo error %lu\n", status); + fatal_mondo_timeout: - printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " - " progress after %d retries.\n", - this_cpu, retries); - goto dump_cpu_list_and_out; - -fatal_mondo_error: - printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", - this_cpu, status); - printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " - "mondo_block_pa(%lx)\n", - this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa); - -dump_cpu_list_and_out: - printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu); - for (i = 0; i < cnt; i++) - printk("%u ", cpu_list[i]); - printk("]\n"); + /* some cpus being non-responsive to the cpu mondo */ + pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n", + this_cpu, first_cpu, (tot_retries + retries), tot_cpus); + panic("SUN4V mondo timeout panic\n"); } static void (*xcall_deliver_impl)(struct trap_per_cpu *, int); diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S index 559bc5e9c199..34631995859a 100644 --- a/arch/sparc/kernel/sun4v_ivec.S +++ b/arch/sparc/kernel/sun4v_ivec.S @@ -26,6 +26,21 @@ sun4v_cpu_mondo: ldxa [%g0] ASI_SCRATCHPAD, %g4 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 + /* Get smp_processor_id() into %g3 */ + sethi %hi(trap_block), %g5 + or %g5, %lo(trap_block), %g5 + sub %g4, %g5, %g3 + srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3 + + /* Increment cpu_mondo_counter[smp_processor_id()] */ + sethi %hi(cpu_mondo_counter), %g5 + or %g5, %lo(cpu_mondo_counter), %g5 + sllx %g3, 3, %g3 + add %g5, %g3, %g5 + ldx [%g5], %g3 + add %g3, 1, %g3 + stx %g3, [%g5] + /* Get CPU mondo queue base phys address into %g7. */ ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 196ee5eb4d48..ad31af1dd726 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2733,6 +2733,7 @@ void do_getpsr(struct pt_regs *regs) } } +u64 cpu_mondo_counter[NR_CPUS] = {0}; struct trap_per_cpu trap_block[NR_CPUS]; EXPORT_SYMBOL(trap_block); diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 07c0df924960..db872dbfafe9 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -360,6 +360,7 @@ tsb_flush: * %o1: TSB base config pointer * %o2: TSB huge config pointer, or NULL if none * %o3: Hypervisor TSB descriptor physical address + * %o4: Secondary context to load, if non-zero * * We have to run this whole thing with interrupts * disabled so that the current cpu doesn't change @@ -372,6 +373,17 @@ __tsb_context_switch: rdpr %pstate, %g1 wrpr %g1, PSTATE_IE, %pstate + brz,pn %o4, 1f + mov SECONDARY_CONTEXT, %o5 + +661: stxa %o4, [%o5] ASI_DMMU + .section .sun4v_1insn_patch, "ax" + .word 661b + stxa %o4, [%o5] ASI_MMU + .previous + flush %g6 + +1: TRAP_LOAD_TRAP_BLOCK(%g2, %g3) stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 54f98706b03b..5a8cb37f0a3b 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08) ENTRY(U3_retl_o2_and_7_plus_GS) and %o2, 7, %o2 retl - add %o2, GLOBAL_SPARE, %o2 + add %o2, GLOBAL_SPARE, %o0 ENDPROC(U3_retl_o2_and_7_plus_GS) ENTRY(U3_retl_o2_and_7_plus_GS_plus_8) add GLOBAL_SPARE, 8, GLOBAL_SPARE and %o2, 7, %o2 retl - add %o2, GLOBAL_SPARE, %o2 + add %o2, GLOBAL_SPARE, %o0 ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) #endif diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 1c6a1bde5138..ce17c3094ba6 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -62,19 +62,23 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENDPROC(atomic_fetch_##op); \ EXPORT_SYMBOL(atomic_fetch_##op); -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) +ATOMIC_OP(add) +ATOMIC_OP_RETURN(add) +ATOMIC_FETCH_OP(add) -ATOMIC_OPS(add) -ATOMIC_OPS(sub) +ATOMIC_OP(sub) +ATOMIC_OP_RETURN(sub) +ATOMIC_FETCH_OP(sub) -#undef ATOMIC_OPS -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) +ATOMIC_OP(and) +ATOMIC_FETCH_OP(and) -ATOMIC_OPS(and) -ATOMIC_OPS(or) -ATOMIC_OPS(xor) +ATOMIC_OP(or) +ATOMIC_FETCH_OP(or) + +ATOMIC_OP(xor) +ATOMIC_FETCH_OP(xor) -#undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -124,19 +128,23 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENDPROC(atomic64_fetch_##op); \ EXPORT_SYMBOL(atomic64_fetch_##op); -#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) +ATOMIC64_OP(add) +ATOMIC64_OP_RETURN(add) +ATOMIC64_FETCH_OP(add) + +ATOMIC64_OP(sub) +ATOMIC64_OP_RETURN(sub) +ATOMIC64_FETCH_OP(sub) -ATOMIC64_OPS(add) -ATOMIC64_OPS(sub) +ATOMIC64_OP(and) +ATOMIC64_FETCH_OP(and) -#undef ATOMIC64_OPS -#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) +ATOMIC64_OP(or) +ATOMIC64_FETCH_OP(or) -ATOMIC64_OPS(and) -ATOMIC64_OPS(or) -ATOMIC64_OPS(xor) +ATOMIC64_OP(xor) +ATOMIC64_FETCH_OP(xor) -#undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S index f6732174fe6b..6cfa521f444d 100644 --- a/arch/sparc/lib/checksum_64.S +++ b/arch/sparc/lib/checksum_64.S @@ -38,6 +38,7 @@ csum_partial_fix_alignment: .align 32 .globl csum_partial + .type csum_partial,#function EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buff, %o1=len, %o2=sum */ prefetch [%o0 + 0x000], #n_reads diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S index 0ecbafc30fd0..b1051e77c49a 100644 --- a/arch/sparc/lib/csum_copy.S +++ b/arch/sparc/lib/csum_copy.S @@ -65,6 +65,7 @@ add %o5, %o4, %o4 .globl FUNC_NAME + .type FUNC_NAME,#function EXPORT_SYMBOL(FUNC_NAME) FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ LOAD(prefetch, %o0 + 0x000, #n_reads) diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S index daa96f4b03e6..5efee1f4be36 100644 --- a/arch/sparc/lib/memscan_64.S +++ b/arch/sparc/lib/memscan_64.S @@ -14,6 +14,8 @@ .text .align 32 .globl __memscan_zero, __memscan_generic + .type __memscan_zero,#function + .type __memscan_generic,#function .globl memscan EXPORT_SYMBOL(__memscan_zero) EXPORT_SYMBOL(__memscan_generic) diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index bb539b42b088..e23338dbfc43 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -63,6 +63,7 @@ __bzero_begin: .globl __bzero + .type __bzero,#function .globl memset EXPORT_SYMBOL(__bzero) EXPORT_SYMBOL(memset) diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index cd0e32bbcb1d..f80cfc64c55b 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -78,8 +78,8 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 0; refs = 0; - head = pmd_page(pmd); - page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + head = compound_head(page); do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3c40ebd50f92..fed73f14aa49 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -325,6 +325,29 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde } #ifdef CONFIG_HUGETLB_PAGE +static void __init add_huge_page_size(unsigned long size) +{ + unsigned int order; + + if (size_to_hstate(size)) + return; + + order = ilog2(size) - PAGE_SHIFT; + hugetlb_add_hstate(order); +} + +static int __init hugetlbpage_init(void) +{ + add_huge_page_size(1UL << HPAGE_64K_SHIFT); + add_huge_page_size(1UL << HPAGE_SHIFT); + add_huge_page_size(1UL << HPAGE_256MB_SHIFT); + add_huge_page_size(1UL << HPAGE_2GB_SHIFT); + + return 0; +} + +arch_initcall(hugetlbpage_init); + static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; @@ -364,7 +387,7 @@ static int __init setup_hugepagesz(char *string) goto out; } - hugetlb_add_hstate(hugepage_shift - PAGE_SHIFT); + add_huge_page_size(hugepage_size); rc = 1; out: diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c index 17bd2e167e07..df707a8ad311 100644 --- a/arch/sparc/power/hibernate.c +++ b/arch/sparc/power/hibernate.c @@ -35,6 +35,5 @@ void restore_processor_state(void) { struct mm_struct *mm = current->active_mm; - load_secondary_context(mm); - tsb_context_switch(mm); + tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0efb4c9497bc..ae1d55548f5a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -94,6 +94,7 @@ config X86 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL + select HARDLOCKUP_CHECK_TIMESTAMP if X86_64 select HAVE_ACPI_APEI if ACPI select HAVE_ACPI_APEI_NMI if ACPI select HAVE_ALIGNED_STRUCT_PAGE if SLUB diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S index 1cd792db15ef..1eab79c9ac48 100644 --- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S +++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S @@ -117,11 +117,10 @@ .set T1, REG_T1 .endm -#define K_BASE %r8 #define HASH_PTR %r9 +#define BLOCKS_CTR %r8 #define BUFFER_PTR %r10 #define BUFFER_PTR2 %r13 -#define BUFFER_END %r11 #define PRECALC_BUF %r14 #define WK_BUF %r15 @@ -205,14 +204,14 @@ * blended AVX2 and ALU instruction scheduling * 1 vector iteration per 8 rounds */ - vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP + vmovdqu (i * 2)(BUFFER_PTR), W_TMP .elseif ((i & 7) == 1) - vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\ + vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\ WY_TMP, WY_TMP .elseif ((i & 7) == 2) vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY .elseif ((i & 7) == 4) - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP .elseif ((i & 7) == 7) vmovdqu WY_TMP, PRECALC_WK(i&~7) @@ -255,7 +254,7 @@ vpxor WY, WY_TMP, WY_TMP .elseif ((i & 7) == 7) vpxor WY_TMP2, WY_TMP, WY - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY @@ -291,7 +290,7 @@ vpsrld $30, WY, WY vpor WY, WY_TMP, WY .elseif ((i & 7) == 7) - vpaddd K_XMM(K_BASE), WY, WY_TMP + vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP vmovdqu WY_TMP, PRECALC_WK(i&~7) PRECALC_ROTATE_WY @@ -446,6 +445,16 @@ .endm +/* Add constant only if (%2 > %3) condition met (uses RTA as temp) + * %1 + %2 >= %3 ? %4 : 0 + */ +.macro ADD_IF_GE a, b, c, d + mov \a, RTA + add $\d, RTA + cmp $\c, \b + cmovge RTA, \a +.endm + /* * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining */ @@ -463,13 +472,16 @@ lea (2*4*80+32)(%rsp), WK_BUF # Precalc WK for first 2 blocks - PRECALC_OFFSET = 0 + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64 .set i, 0 .rept 160 PRECALC i .set i, i + 1 .endr - PRECALC_OFFSET = 128 + + /* Go to next block if needed */ + ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128 + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 xchg WK_BUF, PRECALC_BUF .align 32 @@ -479,8 +491,8 @@ _loop: * we use K_BASE value as a signal of a last block, * it is set below by: cmovae BUFFER_PTR, K_BASE */ - cmp K_BASE, BUFFER_PTR - jne _begin + test BLOCKS_CTR, BLOCKS_CTR + jnz _begin .align 32 jmp _end .align 32 @@ -512,10 +524,10 @@ _loop0: .set j, j+2 .endr - add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */ - cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */ - cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ - + /* Update Counter */ + sub $1, BLOCKS_CTR + /* Move to the next block only if needed*/ + ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128 /* * rounds * 60,62,64,66,68 @@ -532,8 +544,8 @@ _loop0: UPDATE_HASH 12(HASH_PTR), D UPDATE_HASH 16(HASH_PTR), E - cmp K_BASE, BUFFER_PTR /* is current block the last one? */ - je _loop + test BLOCKS_CTR, BLOCKS_CTR + jz _loop mov TB, B @@ -575,10 +587,10 @@ _loop2: .set j, j+2 .endr - add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */ - - cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */ - cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ + /* update counter */ + sub $1, BLOCKS_CTR + /* Move to the next block only if needed*/ + ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128 jmp _loop3 _loop3: @@ -641,19 +653,12 @@ _loop3: avx2_zeroupper - lea K_XMM_AR(%rip), K_BASE - + /* Setup initial values */ mov CTX, HASH_PTR mov BUF, BUFFER_PTR - lea 64(BUF), BUFFER_PTR2 - - shl $6, CNT /* mul by 64 */ - add BUF, CNT - add $64, CNT - mov CNT, BUFFER_END - cmp BUFFER_END, BUFFER_PTR2 - cmovae K_BASE, BUFFER_PTR2 + mov BUF, BUFFER_PTR2 + mov CNT, BLOCKS_CTR xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 4a4c0834f965..22f2281b942b 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1209,6 +1209,8 @@ ENTRY(nmi) * other IST entries. */ + ASM_CLAC + /* Use %rdx as our temp variable throughout */ pushq %rdx diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 580b60f5ac83..c138835c5547 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2105,7 +2105,7 @@ static void refresh_pce(void *ignored) load_mm_cr4(current->active_mm); } -static void x86_pmu_event_mapped(struct perf_event *event) +static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) { if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return; @@ -2120,22 +2120,20 @@ static void x86_pmu_event_mapped(struct perf_event *event) * For now, this can't happen because all callers hold mmap_sem * for write. If this changes, we'll need a different solution. */ - lockdep_assert_held_exclusive(¤t->mm->mmap_sem); + lockdep_assert_held_exclusive(&mm->mmap_sem); - if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) - on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); + if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1) + on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1); } -static void x86_pmu_event_unmapped(struct perf_event *event) +static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) { - if (!current->mm) - return; if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return; - if (atomic_dec_and_test(¤t->mm->context.perf_rdpmc_allowed)) - on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); + if (atomic_dec_and_test(&mm->context.perf_rdpmc_allowed)) + on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1); } static int x86_pmu_event_idx(struct perf_event *event) diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h index 39e702d90cdb..aa6b2023d8f8 100644 --- a/arch/x86/include/asm/bug.h +++ b/arch/x86/include/asm/bug.h @@ -35,7 +35,7 @@ #define _BUG_FLAGS(ins, flags) \ do { \ asm volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"a\"\n" \ + ".pushsection __bug_table,\"aw\"\n" \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \ "\t.word %c1" "\t# bug_entry::line\n" \ @@ -52,7 +52,7 @@ do { \ #define _BUG_FLAGS(ins, flags) \ do { \ asm volatile("1:\t" ins "\n" \ - ".pushsection __bug_table,\"a\"\n" \ + ".pushsection __bug_table,\"aw\"\n" \ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \ "\t.word %c0" "\t# bug_entry::flags\n" \ "\t.org 2b+%c1\n" \ diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index e8ab9a46bc68..9aeb91935ce0 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -245,12 +245,13 @@ extern int force_personality32; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is above 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ + (TASK_SIZE / 3 * 2)) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 255645f60ca2..554cdb205d17 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) return 0; } -static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate) +static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask) { if (use_xsave()) { - copy_kernel_to_xregs(&fpstate->xsave, -1); + copy_kernel_to_xregs(&fpstate->xsave, mask); } else { if (use_fxsr()) copy_kernel_to_fxregs(&fpstate->fxsave); @@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) : : [addr] "m" (fpstate)); } - __copy_kernel_to_fpregs(fpstate); + __copy_kernel_to_fpregs(fpstate, -1); } extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 695605eb1dfb..ed8fdf86acfb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -486,6 +486,7 @@ struct kvm_vcpu_arch { unsigned long cr4; unsigned long cr4_guest_owned_bits; unsigned long cr8; + u32 pkru; u32 hflags; u64 efer; u64 apic_base; diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 68b329d77b3a..8463a136f711 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.execute_only_pkey = -1; } #endif - init_new_context_ldt(tsk, mm); - - return 0; + return init_new_context_ldt(tsk, mm); } static inline void destroy_context(struct mm_struct *mm) { diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 673f9ac50f6d..dbf266b0d14a 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -420,6 +420,8 @@ #define MSR_IA32_TSC_ADJUST 0x0000003b #define MSR_IA32_BNDCFGS 0x00000d90 +#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc + #define MSR_IA32_XSS 0x00000da0 #define FEATURE_CONTROL_LOCKED (1<<0) diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 0b1ff4c1c14e..fffb2794dd89 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -7,6 +7,7 @@ bool pat_enabled(void); void pat_disable(const char *reason); extern void pat_init(void); +extern void init_cache_modes(void); extern int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index f6d20f6cca12..32b74a8fa6b7 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -43,6 +43,7 @@ #include <asm/page.h> #include <asm/pgtable.h> +#include <asm/smap.h> #include <xen/interface/xen.h> #include <xen/interface/sched.h> @@ -214,10 +215,12 @@ privcmd_call(unsigned call, __HYPERCALL_DECLS; __HYPERCALL_5ARG(a1, a2, a3, a4, a5); + stac(); asm volatile("call *%[call]" : __HYPERCALL_5PARAM : [call] "a" (&hypercall_page[call]) : __HYPERCALL_CLOBBER5); + clac(); return (long)__res; } @@ -476,7 +479,11 @@ static inline int HYPERVISOR_dm_op( domid_t dom, unsigned int nr_bufs, void *bufs) { - return _hypercall3(int, dm_op, dom, nr_bufs, bufs); + int ret; + stac(); + ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs); + clac(); + return ret; } static inline void diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6bb680671088..7491e73d9253 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -346,6 +346,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, int pin; struct mpc_intsrc mp_irq; + /* + * Check bus_irq boundary. + */ + if (bus_irq >= NR_IRQS_LEGACY) { + pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq); + return; + } + /* * Convert 'gsi' to 'ioapic.pin'. */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 347bb9f65737..6d779095ed01 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2115,7 +2115,7 @@ static inline void __init check_timer(void) int idx; idx = find_irq_entry(apic1, pin1, mp_INT); if (idx != -1 && irq_trigger(idx)) - unmask_ioapic_irq(irq_get_chip_data(0)); + unmask_ioapic_irq(irq_get_irq_data(0)); } irq_domain_deactivate_irq(irq_data); irq_domain_activate_irq(irq_data); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 43e10d6fdbed..44adcde7a0ca 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token) if (hlist_unhashed(&n.link)) break; + rcu_irq_exit(); + if (!n.halted) { local_irq_enable(); schedule(); @@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token) /* * We cannot reschedule. So halt. */ - rcu_irq_exit(); native_safe_halt(); local_irq_disable(); - rcu_irq_enter(); } + + rcu_irq_enter(); } if (!n.halted) finish_swait(&n.wq, &wait); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index f81823695014..36646f19d40b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1075,6 +1075,13 @@ void __init setup_arch(char **cmdline_p) max_possible_pfn = max_pfn; + /* + * This call is required when the CPU does not support PAT. If + * mtrr_bp_init() invoked it already via pat_init() the call has no + * effect. + */ + init_cache_modes(); + /* * Define random base addresses for memory sections after max_pfn is * defined and before each memory section base is used. diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 59ca2eea522c..19adbb418443 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; cpuid_mask(&entry->ecx, CPUID_7_ECX); /* PKU is not yet implemented for shadow paging. */ - if (!tdp_enabled) + if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) entry->ecx &= ~F(PKU); entry->edx &= kvm_cpuid_7_0_edx_x86_features; entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX); diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index a6fd40aade7c..da6728383052 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -144,6 +144,14 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) return best && (best->ebx & bit(X86_FEATURE_RTM)); } +static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 7, 0); + return best && (best->ebx & bit(X86_FEATURE_MPX)); +} + static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h index 762cdf2595f9..e1e89ee4af75 100644 --- a/arch/x86/kvm/kvm_cache_regs.h +++ b/arch/x86/kvm/kvm_cache_regs.h @@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32); } -static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu) -{ - return kvm_x86_ops->get_pkru(vcpu); -} - static inline void enter_guest_mode(struct kvm_vcpu *vcpu) { vcpu->arch.hflags |= HF_GUEST_MASK; diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 330bf3a811fb..b0d36a229d2e 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -182,7 +182,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, * index of the protection domain, so pte_pkey * 2 is * is the index of the first bit for the domain. */ - pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3; + pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ offset = (pfec & ~1) + diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ba9891ac5c56..58dbca7f2106 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1725,11 +1725,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) to_svm(vcpu)->vmcb->save.rflags = rflags; } -static u32 svm_get_pkru(struct kvm_vcpu *vcpu) -{ - return 0; -} - static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) { switch (reg) { @@ -5313,8 +5308,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .get_rflags = svm_get_rflags, .set_rflags = svm_set_rflags, - .get_pkru = svm_get_pkru, - .tlb_flush = svm_flush_tlb, .run = svm_vcpu_run, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ca5d2b93385c..2461e1a53f8c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -636,8 +636,6 @@ struct vcpu_vmx { u64 current_tsc_ratio; - bool guest_pkru_valid; - u32 guest_pkru; u32 host_pkru; /* @@ -2368,11 +2366,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) vmcs_writel(GUEST_RFLAGS, rflags); } -static u32 vmx_get_pkru(struct kvm_vcpu *vcpu) -{ - return to_vmx(vcpu)->guest_pkru; -} - static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) { u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); @@ -3195,7 +3188,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported()) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; @@ -3277,7 +3271,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: - if (!kvm_mpx_supported()) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) + return 1; + if (is_noncanonical_address(data & PAGE_MASK) || + (data & MSR_IA32_BNDCFGS_RSVD)) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; @@ -6547,7 +6545,6 @@ static __init int hardware_setup(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); memcpy(vmx_msr_bitmap_legacy_x2apic_apicv, vmx_msr_bitmap_legacy, PAGE_SIZE); @@ -8856,8 +8853,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vmx_set_interrupt_shadow(vcpu, 0); - if (vmx->guest_pkru_valid) - __write_pkru(vmx->guest_pkru); + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE) && + vcpu->arch.pkru != vmx->host_pkru) + __write_pkru(vcpu->arch.pkru); atomic_switch_perf_msrs(vmx); debugctlmsr = get_debugctlmsr(); @@ -9005,13 +9004,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * back on host, so it is safe to read guest PKRU from current * XSAVE. */ - if (boot_cpu_has(X86_FEATURE_OSPKE)) { - vmx->guest_pkru = __read_pkru(); - if (vmx->guest_pkru != vmx->host_pkru) { - vmx->guest_pkru_valid = true; + if (static_cpu_has(X86_FEATURE_PKU) && + kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) { + vcpu->arch.pkru = __read_pkru(); + if (vcpu->arch.pkru != vmx->host_pkru) __write_pkru(vmx->host_pkru); - } else - vmx->guest_pkru_valid = false; } /* @@ -11503,8 +11500,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .get_rflags = vmx_get_rflags, .set_rflags = vmx_set_rflags, - .get_pkru = vmx_get_pkru, - .tlb_flush = vmx_flush_tlb, .run = vmx_vcpu_run, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0e846f0cb83b..786e47fc6092 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3236,7 +3236,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) u32 size, offset, ecx, edx; cpuid_count(XSTATE_CPUID, index, &size, &offset, &ecx, &edx); - memcpy(dest + offset, src, size); + if (feature == XFEATURE_MASK_PKRU) + memcpy(dest + offset, &vcpu->arch.pkru, + sizeof(vcpu->arch.pkru)); + else + memcpy(dest + offset, src, size); + } valid -= feature; @@ -3274,7 +3279,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) u32 size, offset, ecx, edx; cpuid_count(XSTATE_CPUID, index, &size, &offset, &ecx, &edx); - memcpy(dest, src + offset, size); + if (feature == XFEATURE_MASK_PKRU) + memcpy(&vcpu->arch.pkru, src + offset, + sizeof(vcpu->arch.pkru)); + else + memcpy(dest, src + offset, size); } valid -= feature; @@ -7616,7 +7625,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) */ vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); - __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state); + /* PKRU is separately restored in kvm_x86_ops->run. */ + __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, + ~XFEATURE_MASK_PKRU); trace_kvm_fpu(1); } diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index c5959576c315..020f75cc8cf6 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -37,7 +37,7 @@ ENTRY(copy_user_generic_unrolled) movl %edx,%ecx andl $63,%edx shrl $6,%ecx - jz 17f + jz .L_copy_short_string 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 @@ -58,7 +58,8 @@ ENTRY(copy_user_generic_unrolled) leaq 64(%rdi),%rdi decl %ecx jnz 1b -17: movl %edx,%ecx +.L_copy_short_string: + movl %edx,%ecx andl $7,%edx shrl $3,%ecx jz 20f @@ -174,6 +175,8 @@ EXPORT_SYMBOL(copy_user_generic_string) */ ENTRY(copy_user_enhanced_fast_string) ASM_STAC + cmpl $64,%edx + jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ movl %edx,%ecx 1: rep movsb diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 19ad095b41df..81db3e92dc76 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -50,8 +50,7 @@ unsigned long tasksize_64bit(void) static unsigned long stack_maxrandom_size(unsigned long task_size) { unsigned long max = 0; - if ((current->flags & PF_RANDOMIZE) && - !(current->personality & ADDR_NO_RANDOMIZE)) { + if (current->flags & PF_RANDOMIZE) { max = (-1UL) & __STACK_RND_MASK(task_size == tasksize_32bit()); max <<= PAGE_SHIFT; } @@ -82,13 +81,13 @@ static int mmap_is_legacy(void) static unsigned long arch_rnd(unsigned int rndbits) { + if (!(current->flags & PF_RANDOMIZE)) + return 0; return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT; } unsigned long arch_mmap_rnd(void) { - if (!(current->flags & PF_RANDOMIZE)) - return 0; return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits); } diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 9b78685b66e6..45979502f64b 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -37,14 +37,14 @@ #undef pr_fmt #define pr_fmt(fmt) "" fmt -static bool boot_cpu_done; - -static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); -static void init_cache_modes(void); +static bool __read_mostly boot_cpu_done; +static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); +static bool __read_mostly pat_initialized; +static bool __read_mostly init_cm_done; void pat_disable(const char *reason) { - if (!__pat_enabled) + if (pat_disabled) return; if (boot_cpu_done) { @@ -52,10 +52,8 @@ void pat_disable(const char *reason) return; } - __pat_enabled = 0; + pat_disabled = true; pr_info("x86/PAT: %s\n", reason); - - init_cache_modes(); } static int __init nopat(char *str) @@ -67,7 +65,7 @@ early_param("nopat", nopat); bool pat_enabled(void) { - return !!__pat_enabled; + return pat_initialized; } EXPORT_SYMBOL_GPL(pat_enabled); @@ -205,6 +203,8 @@ static void __init_cache_modes(u64 pat) update_cache_mode_entry(i, cache); } pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); + + init_cm_done = true; } #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) @@ -225,6 +225,7 @@ static void pat_bsp_init(u64 pat) } wrmsrl(MSR_IA32_CR_PAT, pat); + pat_initialized = true; __init_cache_modes(pat); } @@ -242,10 +243,9 @@ static void pat_ap_init(u64 pat) wrmsrl(MSR_IA32_CR_PAT, pat); } -static void init_cache_modes(void) +void init_cache_modes(void) { u64 pat = 0; - static int init_cm_done; if (init_cm_done) return; @@ -287,8 +287,6 @@ static void init_cache_modes(void) } __init_cache_modes(pat); - - init_cm_done = 1; } /** @@ -306,10 +304,8 @@ void pat_init(void) u64 pat; struct cpuinfo_x86 *c = &boot_cpu_data; - if (!pat_enabled()) { - init_cache_modes(); + if (pat_disabled) return; - } if ((c->x86_vendor == X86_VENDOR_INTEL) && (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 6d52b94f4bb9..20fa7c84109d 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -571,3 +571,35 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); + +/* + * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] + * + * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to + * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used + * for soft poweroff and suspend-to-RAM. + * + * As far as we know, this is related to the address space, not to the Root + * Port itself. Attaching the quirk to the Root Port is a convenience, but + * it could probably also be a standalone DMI quirk. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=103211 + */ +static void quirk_apple_mbp_poweroff(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + + if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") && + !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) || + pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0)) + return; + + res = request_mem_region(0x7fa00000, 0x200000, + "MacBook Pro poweroff workaround"); + if (res) + dev_info(dev, "claimed %s %pR\n", res->name, res); + else + dev_info(dev, "can't work around MacBook Pro poweroff issue\n"); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index aae32535f4ec..3e71246da5fe 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -19,6 +19,7 @@ #include <linux/irq_work.h> #include <linux/tick.h> #include <linux/nmi.h> +#include <linux/cpuhotplug.h> #include <asm/paravirt.h> #include <asm/desc.h> @@ -417,7 +418,7 @@ static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */ */ tick_nohz_idle_enter(); - cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); + cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE); } #else /* !CONFIG_HOTPLUG_CPU */ diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index d159e9b9c018..672391003e40 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c @@ -94,13 +94,11 @@ unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) } EXPORT_SYMBOL(__sync_fetch_and_or_4); -#ifdef CONFIG_NET /* * Networking support */ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); -#endif /* CONFIG_NET */ /* * Architecture-specific symbols diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 1a804a2f9a5b..3c75c4e597da 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -103,6 +103,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) clear_page_alias(kvaddr, paddr); preempt_enable(); } +EXPORT_SYMBOL(clear_user_highpage); void copy_user_highpage(struct page *dst, struct page *src, unsigned long vaddr, struct vm_area_struct *vma) @@ -119,10 +120,7 @@ void copy_user_highpage(struct page *dst, struct page *src, copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); preempt_enable(); } - -#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ - -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +EXPORT_SYMBOL(copy_user_highpage); /* * Any time the kernel writes to a user page cache page, or it is about to @@ -176,7 +174,7 @@ void flush_dcache_page(struct page *page) /* There shouldn't be an entry in the cache for this page anymore. */ } - +EXPORT_SYMBOL(flush_dcache_page); /* * For now, flush the whole cache. FIXME?? @@ -188,6 +186,7 @@ void local_flush_cache_range(struct vm_area_struct *vma, __flush_invalidate_dcache_all(); __invalidate_icache_all(); } +EXPORT_SYMBOL(local_flush_cache_range); /* * Remove any entry in the cache for this page. @@ -207,8 +206,9 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, __flush_invalidate_dcache_page_alias(virt, phys); __invalidate_icache_page_alias(virt, phys); } +EXPORT_SYMBOL(local_flush_cache_page); -#endif +#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ void update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) @@ -225,7 +225,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) flush_tlb_page(vma, addr); -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +#if (DCACHE_WAY_SIZE > PAGE_SIZE) if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { unsigned long phys = page_to_phys(page); @@ -256,7 +256,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) * flush_dcache_page() on the page. */ -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK +#if (DCACHE_WAY_SIZE > PAGE_SIZE) void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, diff --git a/block/blk-core.c b/block/blk-core.c index a7421b772d0e..56a7fac71439 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -3307,6 +3307,10 @@ EXPORT_SYMBOL(blk_finish_plug); */ void blk_pm_runtime_init(struct request_queue *q, struct device *dev) { + /* not support for RQF_PM and ->rpm_status in blk-mq yet */ + if (q->mq_ops) + return; + q->dev = dev; q->rpm_status = RPM_ACTIVE; pm_runtime_set_autosuspend_delay(q->dev, -1); diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 8e61e8640e17..5eaecd40f701 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -35,7 +35,6 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set) { unsigned int *map = set->mq_map; unsigned int nr_queues = set->nr_hw_queues; - const struct cpumask *online_mask = cpu_online_mask; unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; @@ -44,7 +43,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set) cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; - for_each_cpu(i, online_mask) { + for_each_present_cpu(i) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) @@ -54,7 +53,7 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set) queue = 0; for_each_possible_cpu(i) { - if (!cpumask_test_cpu(i, online_mask)) { + if (!cpumask_test_cpu(i, cpu_present_mask)) { map[i] = 0; continue; } diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c index 0c3354cf3552..76944e3271bf 100644 --- a/block/blk-mq-pci.c +++ b/block/blk-mq-pci.c @@ -36,12 +36,18 @@ int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev) for (queue = 0; queue < set->nr_hw_queues; queue++) { mask = pci_irq_get_affinity(pdev, queue); if (!mask) - return -EINVAL; + goto fallback; for_each_cpu(cpu, mask) set->mq_map[cpu] = queue; } return 0; + +fallback: + WARN_ON_ONCE(set->nr_hw_queues > 1); + for_each_possible_cpu(cpu) + set->mq_map[cpu] = 0; + return 0; } EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); diff --git a/block/blk-mq.c b/block/blk-mq.c index 958cedaff8b8..2414e0cd3a02 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -37,9 +37,6 @@ #include "blk-wbt.h" #include "blk-mq-sched.h" -static DEFINE_MUTEX(all_q_mutex); -static LIST_HEAD(all_q_list); - static void blk_mq_poll_stats_start(struct request_queue *q); static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb); static void __blk_mq_stop_hw_queues(struct request_queue *q, bool sync); @@ -623,8 +620,8 @@ EXPORT_SYMBOL(blk_mq_kick_requeue_list); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs) { - kblockd_schedule_delayed_work(&q->requeue_work, - msecs_to_jiffies(msecs)); + kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, + msecs_to_jiffies(msecs)); } EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); @@ -1975,8 +1972,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, INIT_LIST_HEAD(&__ctx->rq_list); __ctx->queue = q; - /* If the cpu isn't online, the cpu is mapped to first hctx */ - if (!cpu_online(i)) + /* If the cpu isn't present, the cpu is mapped to first hctx */ + if (!cpu_present(i)) continue; hctx = blk_mq_map_queue(q, i); @@ -2019,8 +2016,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, } } -static void blk_mq_map_swqueue(struct request_queue *q, - const struct cpumask *online_mask) +static void blk_mq_map_swqueue(struct request_queue *q) { unsigned int i, hctx_idx; struct blk_mq_hw_ctx *hctx; @@ -2038,13 +2034,11 @@ static void blk_mq_map_swqueue(struct request_queue *q, } /* - * Map software to hardware queues + * Map software to hardware queues. + * + * If the cpu isn't present, the cpu is mapped to first hctx. */ - for_each_possible_cpu(i) { - /* If the cpu isn't online, the cpu is mapped to first hctx */ - if (!cpumask_test_cpu(i, online_mask)) - continue; - + for_each_present_cpu(i) { hctx_idx = q->mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && @@ -2340,16 +2334,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, blk_queue_softirq_done(q, set->ops->complete); blk_mq_init_cpu_queues(q, set->nr_hw_queues); - - get_online_cpus(); - mutex_lock(&all_q_mutex); - - list_add_tail(&q->all_q_node, &all_q_list); blk_mq_add_queue_tag_set(set, q); - blk_mq_map_swqueue(q, cpu_online_mask); - - mutex_unlock(&all_q_mutex); - put_online_cpus(); + blk_mq_map_swqueue(q); if (!(set->flags & BLK_MQ_F_NO_SCHED)) { int ret; @@ -2375,18 +2361,12 @@ void blk_mq_free_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; - mutex_lock(&all_q_mutex); - list_del_init(&q->all_q_node); - mutex_unlock(&all_q_mutex); - blk_mq_del_queue_tag_set(q); - blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); } /* Basically redo blk_mq_init_queue with queue frozen */ -static void blk_mq_queue_reinit(struct request_queue *q, - const struct cpumask *online_mask) +static void blk_mq_queue_reinit(struct request_queue *q) { WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); @@ -2399,76 +2379,12 @@ static void blk_mq_queue_reinit(struct request_queue *q, * involves free and re-allocate memory, worthy doing?) */ - blk_mq_map_swqueue(q, online_mask); + blk_mq_map_swqueue(q); blk_mq_sysfs_register(q); blk_mq_debugfs_register_hctxs(q); } -/* - * New online cpumask which is going to be set in this hotplug event. - * Declare this cpumasks as global as cpu-hotplug operation is invoked - * one-by-one and dynamically allocating this could result in a failure. - */ -static struct cpumask cpuhp_online_new; - -static void blk_mq_queue_reinit_work(void) -{ - struct request_queue *q; - - mutex_lock(&all_q_mutex); - /* - * We need to freeze and reinit all existing queues. Freezing - * involves synchronous wait for an RCU grace period and doing it - * one by one may take a long time. Start freezing all queues in - * one swoop and then wait for the completions so that freezing can - * take place in parallel. - */ - list_for_each_entry(q, &all_q_list, all_q_node) - blk_freeze_queue_start(q); - list_for_each_entry(q, &all_q_list, all_q_node) - blk_mq_freeze_queue_wait(q); - - list_for_each_entry(q, &all_q_list, all_q_node) - blk_mq_queue_reinit(q, &cpuhp_online_new); - - list_for_each_entry(q, &all_q_list, all_q_node) - blk_mq_unfreeze_queue(q); - - mutex_unlock(&all_q_mutex); -} - -static int blk_mq_queue_reinit_dead(unsigned int cpu) -{ - cpumask_copy(&cpuhp_online_new, cpu_online_mask); - blk_mq_queue_reinit_work(); - return 0; -} - -/* - * Before hotadded cpu starts handling requests, new mappings must be - * established. Otherwise, these requests in hw queue might never be - * dispatched. - * - * For example, there is a single hw queue (hctx) and two CPU queues (ctx0 - * for CPU0, and ctx1 for CPU1). - * - * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list - * and set bit0 in pending bitmap as ctx1->index_hw is still zero. - * - * And then while running hw queue, blk_mq_flush_busy_ctxs() finds bit0 is set - * in pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list. - * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list is - * ignored. - */ -static int blk_mq_queue_reinit_prepare(unsigned int cpu) -{ - cpumask_copy(&cpuhp_online_new, cpu_online_mask); - cpumask_set_cpu(cpu, &cpuhp_online_new); - blk_mq_queue_reinit_work(); - return 0; -} - static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; @@ -2679,7 +2595,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_update_queue_map(set); list_for_each_entry(q, &set->tag_list, tag_set_list) { blk_mq_realloc_hw_ctxs(set, q); - blk_mq_queue_reinit(q, cpu_online_mask); + blk_mq_queue_reinit(q); } list_for_each_entry(q, &set->tag_list, tag_set_list) @@ -2895,24 +2811,10 @@ bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie) } EXPORT_SYMBOL_GPL(blk_mq_poll); -void blk_mq_disable_hotplug(void) -{ - mutex_lock(&all_q_mutex); -} - -void blk_mq_enable_hotplug(void) -{ - mutex_unlock(&all_q_mutex); -} - static int __init blk_mq_init(void) { cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL, blk_mq_hctx_notify_dead); - - cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare", - blk_mq_queue_reinit_prepare, - blk_mq_queue_reinit_dead); return 0; } subsys_initcall(blk_mq_init); diff --git a/block/blk-mq.h b/block/blk-mq.h index cc67b48e3551..558df56544d2 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -56,11 +56,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, bool at_head); void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, struct list_head *list); -/* - * CPU hotplug helpers - */ -void blk_mq_enable_hotplug(void); -void blk_mq_disable_hotplug(void); /* * CPU -> queue mappings diff --git a/crypto/authencesn.c b/crypto/authencesn.c index 6f8f6b86bfe2..0cf5fefdb859 100644 --- a/crypto/authencesn.c +++ b/crypto/authencesn.c @@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, u8 *ihash = ohash + crypto_ahash_digestsize(auth); u32 tmp[2]; + if (!authsize) + goto decrypt; + /* Move high-order bits of sequence number back. */ scatterwalk_map_and_copy(tmp, dst, 4, 4, 0); scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0); @@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req, if (crypto_memneq(ihash, ohash, authsize)) return -EBADMSG; +decrypt: + sg_init_table(areq_ctx->dst, 2); dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen); diff --git a/crypto/drbg.c b/crypto/drbg.c index cdb27ac4b226..633a88e93ab0 100644 --- a/crypto/drbg.c +++ b/crypto/drbg.c @@ -1691,6 +1691,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg) return PTR_ERR(sk_tfm); } drbg->ctr_handle = sk_tfm; + init_completion(&drbg->ctr_completion); req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); if (!req) { diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index 8baab4307f7b..7830d304dff6 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -496,7 +496,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err) goto done; pos++; - if (memcmp(out_buf + pos, digest_info->data, digest_info->size)) + if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size)) goto done; pos += digest_info->size; diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index fc6c416f8724..d5999eb41c00 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, { "CAV900D", APD_ADDR(vulcan_spi_desc) }, - { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, - { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, + { "HISI02A1", APD_ADDR(hip07_i2c_desc) }, + { "HISI02A2", APD_ADDR(hip08_i2c_desc) }, #endif { } }; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 10347e3d73ad..5bd58bd4ab05 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = { }; struct lpss_private_data { + struct acpi_device *adev; void __iomem *mmio_base; resource_size_t mmio_size; unsigned int fixed_clk_rate; @@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = { static void byt_pwm_setup(struct lpss_private_data *pdata) { + struct acpi_device *adev = pdata->adev; + + /* Only call pwm_add_table for the first PWM controller */ + if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + return; + if (!acpi_dev_present("INT33FD", NULL, -1)) pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); } @@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = { static void bsw_pwm_setup(struct lpss_private_data *pdata) { + struct acpi_device *adev = pdata->adev; + + /* Only call pwm_add_table for the first PWM controller */ + if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + return; + pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); } @@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, goto err_out; } + pdata->adev = adev; pdata->dev_desc = dev_desc; if (dev_desc->setup) diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index c24235d8fb52..d8b2779b0140 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -147,7 +147,7 @@ static unsigned int ec_storm_threshold __read_mostly = 8; module_param(ec_storm_threshold, uint, 0644); MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); -static bool ec_freeze_events __read_mostly = true; +static bool ec_freeze_events __read_mostly = false; module_param(ec_freeze_events, bool, 0644); MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); @@ -1703,7 +1703,7 @@ int __init acpi_ec_dsdt_probe(void) * functioning ECDT EC first in order to handle the events. * https://bugzilla.kernel.org/show_bug.cgi?id=115021 */ -int __init acpi_ec_ecdt_start(void) +static int __init acpi_ec_ecdt_start(void) { acpi_handle handle; @@ -1812,24 +1812,6 @@ int __init acpi_ec_ecdt_probe(void) } #ifdef CONFIG_PM_SLEEP -static int acpi_ec_suspend_noirq(struct device *dev) -{ - struct acpi_ec *ec = - acpi_driver_data(to_acpi_device(dev)); - - acpi_ec_enter_noirq(ec); - return 0; -} - -static int acpi_ec_resume_noirq(struct device *dev) -{ - struct acpi_ec *ec = - acpi_driver_data(to_acpi_device(dev)); - - acpi_ec_leave_noirq(ec); - return 0; -} - static int acpi_ec_suspend(struct device *dev) { struct acpi_ec *ec = @@ -1851,7 +1833,6 @@ static int acpi_ec_resume(struct device *dev) #endif static const struct dev_pm_ops acpi_ec_pm = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) }; @@ -1925,20 +1906,17 @@ static inline void acpi_ec_query_exit(void) int __init acpi_ec_init(void) { int result; + int ecdt_fail, dsdt_fail; /* register workqueue for _Qxx evaluations */ result = acpi_ec_query_init(); if (result) - goto err_exit; - /* Now register the driver for the EC */ - result = acpi_bus_register_driver(&acpi_ec_driver); - if (result) - goto err_exit; + return result; -err_exit: - if (result) - acpi_ec_query_exit(); - return result; + /* Drivers must be started after acpi_ec_query_init() */ + ecdt_fail = acpi_ec_ecdt_start(); + dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver); + return ecdt_fail && dsdt_fail ? -ENODEV : 0; } /* EC driver currently not unloadable */ diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 66229ffa909b..7e66f3c72b81 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data); int acpi_ec_init(void); int acpi_ec_ecdt_probe(void); int acpi_ec_dsdt_probe(void); -int acpi_ec_ecdt_start(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 656acb5d7166..f3c3e9d4563c 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -3043,6 +3043,8 @@ static struct acpi_driver acpi_nfit_driver = { static __init int nfit_init(void) { + int ret; + BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); @@ -3070,8 +3072,14 @@ static __init int nfit_init(void) return -ENOMEM; nfit_mce_register(); + ret = acpi_bus_register_driver(&acpi_nfit_driver); + if (ret) { + nfit_mce_unregister(); + destroy_workqueue(nfit_wq); + } + + return ret; - return acpi_bus_register_driver(&acpi_nfit_driver); } static __exit void nfit_exit(void) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 9364398204e9..6822ac9f106b 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1046,7 +1046,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value( fwnode_for_each_child_node(fwnode, child) { u32 nr; - if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) + if (fwnode_property_read_u32(child, prop_name, &nr)) continue; if (val == nr) diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index d53162997f32..359d16c30002 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2085,7 +2085,6 @@ int __init acpi_scan_init(void) acpi_gpe_apply_masked_gpes(); acpi_update_all_gpes(); - acpi_ec_ecdt_start(); acpi_scan_initialized = true; diff --git a/drivers/android/binder.c b/drivers/android/binder.c index aae4d8d4be36..831cdd7d197d 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc, list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); - if (target_wait) - wake_up_interruptible(target_wait); + if (target_wait) { + if (reply || !(t->flags & TF_ONE_WAY)) + wake_up_interruptible_sync(target_wait); + else + wake_up_interruptible(target_wait); + } return; err_translate_failed: @@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ - if (unlikely(current->mm != proc->vma_vm_mm)) { - pr_err("current mm mismatch proc mm\n"); - return -EINVAL; - } trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); @@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) const char *failure_string; struct binder_buffer *buffer; - if (proc->tsk != current) + if (proc->tsk != current->group_leader) return -EINVAL; if ((vma->vm_end - vma->vm_start) > SZ_4M) @@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; - get_task_struct(current); - proc->tsk = current; - proc->vma_vm_mm = current->mm; + get_task_struct(current->group_leader); + proc->tsk = current->group_leader; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 49ba9834c715..12d59968020f 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3028,10 +3028,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) { if (!sata_pmp_attached(ap)) { - if (likely(devno < ata_link_max_devices(&ap->link))) + if (likely(devno >= 0 && + devno < ata_link_max_devices(&ap->link))) return &ap->link.device[devno]; } else { - if (likely(devno < ap->nr_pmp_links)) + if (likely(devno >= 0 && + devno < ap->nr_pmp_links)) return &ap->pmp_link[devno].device[0]; } diff --git a/drivers/base/core.c b/drivers/base/core.c index bbecaf9293be..d3228cb7d12f 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2667,7 +2667,11 @@ void device_shutdown(void) pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); - if (dev->bus && dev->bus->shutdown) { + if (dev->class && dev->class->shutdown) { + if (initcall_debug) + dev_info(dev, "shutdown\n"); + dev->class->shutdown(dev); + } else if (dev->bus && dev->bus->shutdown) { if (initcall_debug) dev_info(dev, "shutdown\n"); dev->bus->shutdown(dev); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index ac350c518e0c..31c0586d9b13 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -30,7 +30,6 @@ #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> -#include <linux/swait.h> #include <generated/utsrelease.h> @@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void) * state of the firmware loading. */ struct fw_state { - struct swait_queue_head wq; + struct completion completion; enum fw_status status; }; static void fw_state_init(struct fw_state *fw_st) { - init_swait_queue_head(&fw_st->wq); + init_completion(&fw_st->completion); fw_st->status = FW_STATUS_UNKNOWN; } @@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) { long ret; - ret = swait_event_interruptible_timeout(fw_st->wq, - __fw_state_is_done(READ_ONCE(fw_st->status)), - timeout); + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) return -ENOENT; if (!ret) @@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st, WRITE_ONCE(fw_st->status, status); if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) - swake_up(&fw_st->wq); + complete_all(&fw_st->completion); } #define fw_state_start(fw_st) \ __fw_state_set(fw_st, FW_STATUS_LOADING) #define fw_state_done(fw_st) \ __fw_state_set(fw_st, FW_STATUS_DONE) +#define fw_state_aborted(fw_st) \ + __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_wait(fw_st) \ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) -#ifndef CONFIG_FW_LOADER_USER_HELPER - -#define fw_state_is_aborted(fw_st) false - -#else /* CONFIG_FW_LOADER_USER_HELPER */ - static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) { return fw_st->status == status; } +#define fw_state_is_aborted(fw_st) \ + __fw_state_check(fw_st, FW_STATUS_ABORTED) + +#ifdef CONFIG_FW_LOADER_USER_HELPER + #define fw_state_aborted(fw_st) \ __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_is_done(fw_st) \ __fw_state_check(fw_st, FW_STATUS_DONE) #define fw_state_is_loading(fw_st) \ __fw_state_check(fw_st, FW_STATUS_LOADING) -#define fw_state_is_aborted(fw_st) \ - __fw_state_check(fw_st, FW_STATUS_ABORTED) #define fw_state_wait_timeout(fw_st, timeout) \ __fw_state_wait_common(fw_st, timeout) @@ -1163,6 +1159,28 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device, return 0; } +/* + * Batched requests need only one wake, we need to do this step last due to the + * fallback mechanism. The buf is protected with kref_get(), and it won't be + * released until the last user calls release_firmware(). + * + * Failed batched requests are possible as well, in such cases we just share + * the struct firmware_buf and won't release it until all requests are woken + * and have gone through this same path. + */ +static void fw_abort_batch_reqs(struct firmware *fw) +{ + struct firmware_buf *buf; + + /* Loaded directly? */ + if (!fw || !fw->priv) + return; + + buf = fw->priv; + if (!fw_state_is_aborted(&buf->fw_st)) + fw_state_aborted(&buf->fw_st); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -1224,6 +1242,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, out: if (ret < 0) { + fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index a102152301c8..97332d094fe2 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -866,7 +866,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; if (count > PATH_MAX) return -EINVAL; @@ -879,12 +879,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -895,8 +898,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); + ssize_t len; - return sprintf(buf, "%s\n", pdev->driver_override); + device_lock(dev); + len = sprintf(buf, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index da49a8383dc3..d417ad1e639c 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1168,8 +1168,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, spin_unlock_irq(&dev->power.lock); - dev_pm_domain_set(dev, &genpd->domain); - return gpd_data; err_free: @@ -1183,8 +1181,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, static void genpd_free_dev_data(struct device *dev, struct generic_pm_domain_data *gpd_data) { - dev_pm_domain_set(dev, NULL); - spin_lock_irq(&dev->power.lock); dev->power.subsys_data->domain_data = NULL; @@ -1221,6 +1217,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (ret) goto out; + dev_pm_domain_set(dev, &genpd->domain); + genpd->device_count++; genpd->max_off_time_changed = true; @@ -1282,6 +1280,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, if (genpd->detach_dev) genpd->detach_dev(genpd, dev); + dev_pm_domain_set(dev, NULL); + list_del_init(&pdd->list_node); genpd_unlock(genpd); @@ -1393,7 +1393,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, struct generic_pm_domain *subdomain) { - struct gpd_link *link; + struct gpd_link *l, *link; int ret = -EINVAL; if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) @@ -1409,7 +1409,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, goto out; } - list_for_each_entry(link, &genpd->master_links, master_node) { + list_for_each_entry_safe(link, l, &genpd->master_links, master_node) { if (link->slave != subdomain) continue; @@ -1780,12 +1780,12 @@ EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); */ void of_genpd_del_provider(struct device_node *np) { - struct of_genpd_provider *cp; + struct of_genpd_provider *cp, *tmp; struct generic_pm_domain *gpd; mutex_lock(&gpd_list_lock); mutex_lock(&of_genpd_mutex); - list_for_each_entry(cp, &of_genpd_providers, link) { + list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { if (cp->node == np) { /* * For each PM domain associated with the @@ -1925,14 +1925,14 @@ EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); */ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) { - struct generic_pm_domain *gpd, *genpd = ERR_PTR(-ENOENT); + struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); int ret; if (IS_ERR_OR_NULL(np)) return ERR_PTR(-EINVAL); mutex_lock(&gpd_list_lock); - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { if (gpd->provider == &np->fwnode) { ret = genpd_remove(gpd); genpd = ret ? ERR_PTR(ret) : gpd; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 33b4b902741a..2df45ec8d935 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -272,6 +272,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) value = PM_QOS_LATENCY_ANY; + else + return -EINVAL; } ret = dev_pm_qos_update_user_latency_tolerance(dev, value); return ret < 0 ? ret : n; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index c313b600d356..994bbf8b1476 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -60,6 +60,8 @@ static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); +DEFINE_STATIC_SRCU(wakeup_srcu); + static struct wakeup_source deleted_ws = { .name = "deleted", .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), @@ -198,7 +200,7 @@ void wakeup_source_remove(struct wakeup_source *ws) spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); spin_unlock_irqrestore(&events_lock, flags); - synchronize_rcu(); + synchronize_srcu(&wakeup_srcu); } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -332,12 +334,12 @@ void device_wakeup_detach_irq(struct device *dev) void device_wakeup_arm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) dev_pm_arm_wake_irq(ws->wakeirq); - - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -348,12 +350,12 @@ void device_wakeup_arm_wake_irqs(void) void device_wakeup_disarm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) dev_pm_disarm_wake_irq(ws->wakeirq); - - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -804,10 +806,10 @@ EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; - int active = 0; + int srcuidx, active = 0; struct wakeup_source *last_activity_ws = NULL; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_debug("active wakeup source: %s\n", ws->name); @@ -823,7 +825,7 @@ void pm_print_active_wakeup_sources(void) if (!active && last_activity_ws) pr_debug("last active wakeup source: %s\n", last_activity_ws->name); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); @@ -950,8 +952,9 @@ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { @@ -965,7 +968,7 @@ void pm_wakep_autosleep_enabled(bool set) } spin_unlock_irq(&ws->lock); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } #endif /* CONFIG_PM_AUTOSLEEP */ @@ -1026,15 +1029,16 @@ static int print_wakeup_source_stats(struct seq_file *m, static int wakeup_sources_stats_show(struct seq_file *m, void *unused) { struct wakeup_source *ws; + int srcuidx; seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) print_wakeup_source_stats(m, ws); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); print_wakeup_source_stats(m, &deleted_ws); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 39459631667c..b49547c5f2c2 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2119,9 +2119,9 @@ static int blkfront_resume(struct xenbus_device *dev) /* * Get the bios in the request so we can re-queue them. */ - if (req_op(shadow[i].request) == REQ_OP_FLUSH || - req_op(shadow[i].request) == REQ_OP_DISCARD || - req_op(shadow[i].request) == REQ_OP_SECURE_ERASE || + if (req_op(shadow[j].request) == REQ_OP_FLUSH || + req_op(shadow[j].request) == REQ_OP_DISCARD || + req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || shadow[j].request->cmd_flags & REQ_FUA) { /* * Flush operations don't contain bios, so diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 9f699951b75a..49a7e9685e77 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -3878,6 +3878,9 @@ static void smi_recv_tasklet(unsigned long val) * because the lower layer is allowed to hold locks while calling * message delivery. */ + + rcu_read_lock(); + if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg == NULL && !intf->in_shutdown) { @@ -3900,6 +3903,8 @@ static void smi_recv_tasklet(unsigned long val) if (newmsg) intf->handlers->sender(intf->send_info, newmsg); + rcu_read_unlock(); + handle_new_recv_msgs(intf); } diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 0b22a9be5029..12d15212f0b2 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -761,6 +761,11 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, result, len, data[2]); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { + /* + * Don't abort here, maybe it was a queued + * response to a previous command. + */ + ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Invalid response getting flags: %x %x\n", data[0], data[1]); } else { diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index d165af8abe36..4161d9961a24 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -1163,10 +1163,11 @@ static int wdog_reboot_handler(struct notifier_block *this, ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { - /* Set a long timer to let the reboot happens, but - reboot if it hangs, but only if the watchdog + /* Set a long timer to let the reboot happen or + reset if it hangs, but only if the watchdog timer was already running. */ - timeout = 120; + if (timeout < 120) + timeout = 120; pretimeout = 0; ipmi_watchdog_state = WDOG_TIMEOUT_RESET; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 322b8a51ffc6..67ec9d3d04f5 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -142,6 +142,39 @@ static void tpm_devs_release(struct device *dev) put_device(&chip->dev); } +/** + * tpm_class_shutdown() - prepare the TPM device for loss of power. + * @dev: device to which the chip is associated. + * + * Issues a TPM2_Shutdown command prior to loss of power, as required by the + * TPM 2.0 spec. + * Then, calls bus- and device- specific shutdown code. + * + * XXX: This codepath relies on the fact that sysfs is not enabled for + * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2 + * has sysfs support enabled before TPM sysfs's implicit locking is fixed. + */ +static int tpm_class_shutdown(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + down_write(&chip->ops_sem); + tpm2_shutdown(chip, TPM2_SU_CLEAR); + chip->ops = NULL; + up_write(&chip->ops_sem); + } + /* Allow bus- and device-specific code to run. Note: since chip->ops + * is NULL, more-specific shutdown code will not be able to issue TPM + * commands. + */ + if (dev->bus && dev->bus->shutdown) + dev->bus->shutdown(dev); + else if (dev->driver && dev->driver->shutdown) + dev->driver->shutdown(dev); + return 0; +} + /** * tpm_chip_alloc() - allocate a new struct tpm_chip instance * @pdev: device to which the chip is associated @@ -181,6 +214,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev, device_initialize(&chip->devs); chip->dev.class = tpm_class; + chip->dev.class->shutdown = tpm_class_shutdown; chip->dev.release = tpm_dev_release; chip->dev.parent = pdev; chip->dev.groups = chip->groups; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 55405dbe43fa..c6fa9bef11cf 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -36,9 +36,10 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, ssize_t err; int i, rc; char *str = buf; - struct tpm_chip *chip = to_tpm_chip(dev); + memset(&tpm_cmd, 0, sizeof(tpm_cmd)); + tpm_cmd.header.in = tpm_readpubek_header; err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE, READ_PUBEK_RESULT_MIN_BODY_SIZE, 0, @@ -294,6 +295,9 @@ static const struct attribute_group tpm_dev_group = { void tpm_sysfs_add_device(struct tpm_chip *chip) { + /* XXX: If you wish to remove this restriction, you must first update + * tpm_sysfs to explicitly lock chip->ops. + */ if (chip->flags & TPM_CHIP_FLAG_TPM2) return; diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c index 5372bf8be5e6..31d7ffda9aab 100644 --- a/drivers/clk/sunxi-ng/ccu-sun5i.c +++ b/drivers/clk/sunxi-ng/ccu-sun5i.c @@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = { .hw.init = CLK_HW_INIT_PARENTS("cpu", cpu_parents, &ccu_mux_ops, - CLK_IS_CRITICAL), + CLK_SET_RATE_PARENT | CLK_IS_CRITICAL), } }; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index eb1158532de3..0d82f3302df6 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -225,6 +225,9 @@ struct global_params { * @vid: Stores VID limits for this CPU * @pid: Stores PID parameters for this CPU * @last_sample_time: Last Sample time + * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented + * This shift is a multiplier to mperf delta to + * calculate CPU busy. * @prev_aperf: Last APERF value read from APERF MSR * @prev_mperf: Last MPERF value read from MPERF MSR * @prev_tsc: Last timestamp counter (TSC) value @@ -261,6 +264,7 @@ struct cpudata { u64 last_update; u64 last_sample_time; + u64 aperf_mperf_shift; u64 prev_aperf; u64 prev_mperf; u64 prev_tsc; @@ -323,6 +327,7 @@ struct pstate_funcs { int (*get_min)(void); int (*get_turbo)(void); int (*get_scaling)(void); + int (*get_aperf_mperf_shift)(void); u64 (*get_val)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); void (*update_util)(struct update_util_data *data, u64 time, @@ -1485,6 +1490,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) return val; } +static int knl_get_aperf_mperf_shift(void) +{ + return 10; +} + static int knl_get_turbo_pstate(void) { u64 value; @@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + if (pstate_funcs.get_aperf_mperf_shift) + cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); + if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); @@ -1619,7 +1632,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) return cpu->pstate.turbo_pstate; - busy_frac = div_fp(sample->mperf, sample->tsc); + busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, + sample->tsc); boost = cpu->iowait_boost; cpu->iowait_boost >>= 1; @@ -1681,7 +1695,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); perf_scaled = mul_fp(perf_scaled, sample_ratio); } else { - sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); + sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift), + cpu->sample.tsc); if (sample_ratio < int_tofp(1)) perf_scaled = 0; } @@ -1824,6 +1839,7 @@ static const struct pstate_funcs knl_funcs = { .get_max_physical = core_get_max_pstate_physical, .get_min = core_get_min_pstate, .get_turbo = knl_get_turbo_pstate, + .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, .get_scaling = core_get_scaling, .get_val = core_get_val, .update_util = intel_pstate_update_util_pid, @@ -2408,6 +2424,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_val = funcs->get_val; pstate_funcs.get_vid = funcs->get_vid; pstate_funcs.update_util = funcs->update_util; + pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; intel_pstate_use_acpi_profile(); } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index fb1e60f5002e..778fc1bcccee 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -629,7 +629,7 @@ source "drivers/crypto/virtio/Kconfig" config CRYPTO_DEV_BCM_SPU tristate "Broadcom symmetric crypto/hash acceleration support" depends on ARCH_BCM_IPROC - depends on BCM_PDC_MBOX + depends on MAILBOX default m select CRYPTO_DES select CRYPTO_MD5 diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index a9482023d7d3..dad4e5bad827 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1204,7 +1204,9 @@ static int atmel_sha_finup(struct ahash_request *req) ctx->flags |= SHA_FLAGS_FINUP; err1 = atmel_sha_update(req); - if (err1 == -EINPROGRESS || err1 == -EBUSY) + if (err1 == -EINPROGRESS || + (err1 == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) return err1; /* diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index ef04c9748317..bf7ac621c591 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode, break; case HASH_ALG_SHA3_512: *spu2_type = SPU2_HASH_TYPE_SHA3_512; + break; case HASH_ALG_LAST: default: err = -EINVAL; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 398807d1b77e..d4a716326f67 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -882,10 +882,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; -#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -904,6 +904,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, #endif ablkcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. This is used e.g. by the CTS mode. + */ + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, + ivsize, 0); + kfree(edesc); ablkcipher_request_complete(req, err); @@ -914,10 +922,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; -#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -935,6 +943,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, #endif ablkcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, + ivsize, 0); + kfree(edesc); ablkcipher_request_complete(req, err); @@ -1475,8 +1491,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); struct device *jrdev = ctx->jrdev; - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | - CRYPTO_TFM_REQ_MAY_SLEEP)) ? + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct ablkcipher_edesc *edesc; diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index da4f94eab3da..718a03293ab6 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -396,7 +396,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 1bb2816a9b4d..c425d4adaf2a 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -149,7 +149,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 771dd26c7076..6719e346b790 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -1074,7 +1074,7 @@ static int aead_perform(struct aead_request *req, int encrypt, req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &crypt->icv_rev_aes); if (unlikely(!req_ctx->hmac_virt)) - goto free_buf_src; + goto free_buf_dst; if (!encrypt) { scatterwalk_map_and_copy(req_ctx->hmac_virt, req->src, cryptlen, authsize, 0); @@ -1089,10 +1089,10 @@ static int aead_perform(struct aead_request *req, int encrypt, BUG_ON(qmgr_stat_overflow(SEND_QID)); return -EINPROGRESS; -free_buf_src: - free_buf_chain(dev, req_ctx->src, crypt->src_buf); free_buf_dst: free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); +free_buf_src: + free_buf_chain(dev, req_ctx->src, crypt->src_buf); crypt->ctl_flags = CTL_FLAG_UNUSED; return -ENOMEM; } diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 0bba6a19d36a..79791c690858 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -816,7 +816,7 @@ static void talitos_unregister_rng(struct device *dev) * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP */ #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) -#define TALITOS_MAX_KEY_SIZE 96 +#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ struct talitos_ctx { @@ -1495,6 +1495,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + if (keylen > TALITOS_MAX_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h index fdcd9769ffde..688b051750bd 100644 --- a/drivers/dax/device-dax.h +++ b/drivers/dax/device-dax.h @@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct resource *res, unsigned int align, void *addr, unsigned long flags); struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - struct resource *res, int count); + int id, struct resource *res, int count); #endif /* __DEVICE_DAX_H__ */ diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 006e657dfcb9..a54188e7c04a 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -528,7 +528,8 @@ static void dev_dax_release(struct device *dev) struct dax_region *dax_region = dev_dax->region; struct dax_device *dax_dev = dev_dax->dax_dev; - ida_simple_remove(&dax_region->ida, dev_dax->id); + if (dev_dax->id >= 0) + ida_simple_remove(&dax_region->ida, dev_dax->id); dax_region_put(dax_region); put_dax(dax_dev); kfree(dev_dax); @@ -558,7 +559,7 @@ static void unregister_dev_dax(void *dev) } struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - struct resource *res, int count) + int id, struct resource *res, int count) { struct device *parent = dax_region->dev; struct dax_device *dax_dev; @@ -586,10 +587,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, if (i < count) goto err_id; - dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); - if (dev_dax->id < 0) { - rc = dev_dax->id; - goto err_id; + if (id < 0) { + id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); + dev_dax->id = id; + if (id < 0) { + rc = id; + goto err_id; + } + } else { + /* region provider owns @id lifetime */ + dev_dax->id = -1; } /* @@ -619,7 +626,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, dev->parent = parent; dev->groups = dax_attribute_groups; dev->release = dev_dax_release; - dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); + dev_set_name(dev, "dax%d.%d", dax_region->id, id); rc = cdev_device_add(cdev, dev); if (rc) { @@ -635,7 +642,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, return dev_dax; err_dax: - ida_simple_remove(&dax_region->ida, dev_dax->id); + if (dev_dax->id >= 0) + ida_simple_remove(&dax_region->ida, dev_dax->id); err_id: kfree(dev_dax); diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 9f2a0b4fd801..8d8c852ba8f2 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data) static int dax_pmem_probe(struct device *dev) { - int rc; void *addr; struct resource res; + int rc, id, region_id; struct nd_pfn_sb *pfn_sb; struct dev_dax *dev_dax; struct dax_pmem *dax_pmem; - struct nd_region *nd_region; struct nd_namespace_io *nsio; struct dax_region *dax_region; struct nd_namespace_common *ndns; @@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev) /* adjust the dax_region resource to the start of data */ res.start += le64_to_cpu(pfn_sb->dataoff); - nd_region = to_nd_region(dev->parent); - dax_region = alloc_dax_region(dev, nd_region->id, &res, + rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id); + if (rc != 2) + return -EINVAL; + + dax_region = alloc_dax_region(dev, region_id, &res, le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); if (!dax_region) return -ENOMEM; /* TODO: support for subdividing a dax region... */ - dev_dax = devm_create_dev_dax(dax_region, &res, 1); + dev_dax = devm_create_dev_dax(dax_region, id, &res, 1); /* child dev_dax instances now own the lifetime of the dax_region */ dax_region_put(dax_region); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index b372aad3b449..045d6d311bde 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -528,7 +528,8 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, } } - efi_memattr_init(); + if (efi_enabled(EFI_MEMMAP)) + efi_memattr_init(); /* Parse the EFI Properties table if it exists */ if (efi.properties_table != EFI_INVALID_TABLE_ADDR) { diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a42a1eea5714..2e96b3d46e0c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) { struct lineevent_state *le = p; struct gpioevent_data ge; - int ret; + int ret, level; ge.timestamp = ktime_get_real_ns(); + level = gpiod_get_value_cansleep(le->desc); if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { - int level = gpiod_get_value_cansleep(le->desc); - if (level) /* Emit low-to-high event */ ge.id = GPIOEVENT_EVENT_RISING_EDGE; else /* Emit high-to-low event */ ge.id = GPIOEVENT_EVENT_FALLING_EDGE; - } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { + } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) { /* Emit low-to-high event */ ge.id = GPIOEVENT_EVENT_RISING_EDGE; - } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { + } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) { /* Emit high-to-low event */ ge.id = GPIOEVENT_EVENT_FALLING_EDGE; } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index c6dba1eaefbd..f4a4efec8737 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -838,6 +838,10 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, return -EINVAL; mode_info = info->mode_info; + if (mode_info) { + /* always set the reference clock */ + mode_info->ref_clock = adev->clock.spll.reference_freq; + } if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index f5ae871aa11c..9c9d39018bd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -208,7 +208,8 @@ static void amdgpu_pp_late_fini(void *handle) if (adev->pp_enabled && adev->pm.dpm_enabled) amdgpu_pm_sysfs_fini(adev); - amd_powerplay_destroy(adev->powerplay.pp_handle); + if (adev->pp_enabled) + amd_powerplay_destroy(adev->powerplay.pp_handle); } static int amdgpu_pp_suspend(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index ac5e92e5d59d..99f575070943 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -319,14 +319,11 @@ static int psp_load_fw(struct amdgpu_device *adev) { int ret; struct psp_context *psp = &adev->psp; - struct psp_gfx_cmd_resp *cmd; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) + psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!psp->cmd) return -ENOMEM; - psp->cmd = cmd; - ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, AMDGPU_GEM_DOMAIN_GTT, &psp->fw_pri_bo, @@ -365,8 +362,6 @@ static int psp_load_fw(struct amdgpu_device *adev) if (ret) goto failed_mem; - kfree(cmd); - return 0; failed_mem: @@ -376,7 +371,8 @@ static int psp_load_fw(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&psp->fw_pri_bo, &psp->fw_pri_mc_addr, &psp->fw_pri_buf); failed: - kfree(cmd); + kfree(psp->cmd); + psp->cmd = NULL; return ret; } @@ -436,6 +432,9 @@ static int psp_hw_fini(void *handle) amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); + kfree(psp->cmd); + psp->cmd = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index ed814e6d0207..28c1112e520c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct dma_fence *f = e->fence; struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + if (dma_fence_is_signaled(f)) { + hash_del(&e->node); + dma_fence_put(f); + kmem_cache_free(amdgpu_sync_slab, e); + continue; + } if (ring && s_fence) { /* For fences from the same ring it is sufficient * when they are scheduled. @@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, } } - if (dma_fence_is_signaled(f)) { - hash_del(&e->node); - dma_fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - return f; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5db0230e45c6..e06c8a718310 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1462,6 +1462,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (*pos >= adev->mc.mc_vram_size) + return -ENXIO; + while (size) { unsigned long flags; uint32_t value; diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h index 18fd01f3e4b2..003a131bad47 100644 --- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h @@ -1,24 +1,25 @@ - /* -*************************************************************************************************** -* -* Trade secret of Advanced Micro Devices, Inc. -* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished) -* -* All rights reserved. This notice is intended as a precaution against inadvertent publication and -* does not imply publication or any waiver of confidentiality. The year included in the foregoing -* notice is the year of creation of the work. -* -*************************************************************************************************** -*/ -/** -*************************************************************************************************** -* @brief gfx9 Clearstate Definitions -*************************************************************************************************** -* -* Do not edit! This is a machine-generated file! -* -*/ + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ static const unsigned int gfx9_SECT_CONTEXT_def_1[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index a125f9d44577..da2fe803e22f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1688,7 +1688,8 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); - mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); + adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); + mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; adev->gfx.config.mem_max_burst_length_bytes = 256; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 758d636a6f52..88b76cdfc9b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1907,46 +1907,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.max_tile_pipes = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_backends_per_se = 2; - - switch (adev->pdev->revision) { - case 0xc4: - case 0x84: - case 0xc8: - case 0xcc: - case 0xe1: - case 0xe3: - /* B10 */ - adev->gfx.config.max_cu_per_sh = 8; - break; - case 0xc5: - case 0x81: - case 0x85: - case 0xc9: - case 0xcd: - case 0xe2: - case 0xe4: - /* B8 */ - adev->gfx.config.max_cu_per_sh = 6; - break; - case 0xc6: - case 0xca: - case 0xce: - case 0x88: - case 0xe6: - /* B6 */ - adev->gfx.config.max_cu_per_sh = 6; - break; - case 0xc7: - case 0x87: - case 0xcb: - case 0xe5: - case 0x89: - default: - /* B4 */ - adev->gfx.config.max_cu_per_sh = 4; - break; - } - + adev->gfx.config.max_cu_per_sh = 8; adev->gfx.config.max_texture_channel_caches = 2; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 32; @@ -1963,35 +1924,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.max_tile_pipes = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_backends_per_se = 1; - - switch (adev->pdev->revision) { - case 0x80: - case 0x81: - case 0xc0: - case 0xc1: - case 0xc2: - case 0xc4: - case 0xc8: - case 0xc9: - case 0xd6: - case 0xda: - case 0xe9: - case 0xea: - adev->gfx.config.max_cu_per_sh = 3; - break; - case 0x83: - case 0xd0: - case 0xd1: - case 0xd2: - case 0xd4: - case 0xdb: - case 0xe1: - case 0xe2: - default: - adev->gfx.config.max_cu_per_sh = 2; - break; - } - + adev->gfx.config.max_cu_per_sh = 3; adev->gfx.config.max_texture_channel_caches = 2; adev->gfx.config.max_gprs = 256; adev->gfx.config.max_gs_threads = 16; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index c0b1aabf282f..7dbb7cf47986 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, pitcairn_mgcg_cgcg_init, (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); + break; case CHIP_VERDE: amdgpu_program_register_sequence(adev, verde_golden_registers, @@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev) amdgpu_program_register_sequence(adev, oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); + break; case CHIP_HAINAN: amdgpu_program_register_sequence(adev, hainan_golden_registers, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 7aa5ca815a3a..0b74da3dca8b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1224,6 +1224,12 @@ static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management)); phm_destroy_table(hwmgr, &(hwmgr->power_down_asic)); phm_destroy_table(hwmgr, &(hwmgr->setup_asic)); + + if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + } + kfree(hwmgr->backend); hwmgr->backend = NULL; } diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 5c26488e7a2d..0529e500c534 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) /* port@2 is the output port */ ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); - if (ret) + if (ret && ret != -ENODEV) return ret; /* Shut down GPIO is optional */ diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f32506a7c1d6..422404dbfabb 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1581,6 +1581,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state) if (config->funcs->atomic_check) ret = config->funcs->atomic_check(state->dev, state); + if (ret) + return ret; + if (!state->allow_modeset) { for_each_new_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) { @@ -1591,7 +1594,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state) } } - return ret; + return 0; } EXPORT_SYMBOL(drm_atomic_check_only); @@ -2093,10 +2096,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; struct drm_plane *plane; - struct drm_out_fence_state *fence_state = NULL; + struct drm_out_fence_state *fence_state; unsigned plane_mask; int ret = 0; - unsigned int i, j, num_fences = 0; + unsigned int i, j, num_fences; /* disallow for drivers not supporting atomic: */ if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) @@ -2137,6 +2140,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, plane_mask = 0; copied_objs = 0; copied_props = 0; + fence_state = NULL; + num_fences = 0; for (i = 0; i < arg->count_objs; i++) { uint32_t obj_id, count_props; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index d3fc7e4e85b7..e86d3a5badf4 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, return false; } + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + /* get length contained in this portion */ msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_hdrlen = hdrlen; @@ -2164,7 +2171,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) { int len; u8 replyblock[32]; @@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) replyblock, len); if (ret != len) { DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); - return; + return false; } ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); if (!ret) { DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); - return; + return false; } replylen = msg->curchunk_len + msg->curchunk_hdrlen; @@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read a chunk\n"); + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", + len, ret); + return false; } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); - if (ret == false) + if (!ret) { DRM_DEBUG_KMS("failed to build sideband msg\n"); + return false; + } + curreply += len; replylen -= len; } + return true; } static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, false); + if (!drm_dp_get_one_sb_msg(mgr, false)) { + memset(&mgr->down_rep_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->down_rep_recv.have_eomt) { struct drm_dp_sideband_msg_tx *txmsg; @@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, true); + + if (!drm_dp_get_one_sb_msg(mgr, true)) { + memset(&mgr->up_req_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; @@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } - drm_dp_put_mst_branch_device(mstb); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); } return ret; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index fc8ef42203ec..b3ef4f1c2630 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -832,6 +832,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret == -EDEADLK) { + drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index b1e28c944637..08e6e981104f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_gem_object *obj = ptr; struct drm_device *dev = obj->dev; + if (dev->driver->gem_close_object) + dev->driver->gem_close_object(obj, file_priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_gem_remove_prime_handles(obj, file_priv); drm_vma_node_revoke(&obj->vma_node, file_priv); - if (dev->driver->gem_close_object) - dev->driver->gem_close_object(obj, file_priv); - drm_gem_object_handle_put_unlocked(obj); return 0; diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 5dc8c4350602..e40c12fabbde 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data, crtc = drm_crtc_find(dev, plane_req->crtc_id); if (!crtc) { + drm_framebuffer_put(fb); DRM_DEBUG_KMS("Unknown crtc ID %d\n", plane_req->crtc_id); return -ENOENT; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 5255278dde56..91e17aeee1da 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -495,6 +495,7 @@ static struct drm_driver etnaviv_drm_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = drm_gem_prime_export, .gem_prime_import = drm_gem_prime_import, + .gem_prime_res_obj = etnaviv_gem_prime_res_obj, .gem_prime_pin = etnaviv_gem_prime_pin, .gem_prime_unpin = etnaviv_gem_prime_unpin, .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index e41f38667c1c..058389f93b69 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -80,6 +80,7 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj); void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int etnaviv_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); +struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj); struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); int etnaviv_gem_prime_pin(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 62b47972a52e..abed6f781281 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -150,3 +150,10 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, return ERR_PTR(ret); } + +struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj) +{ + struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); + + return etnaviv_obj->resv; +} diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 1013765274da..0ceed22187df 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, if (ret) return ret; - if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { - DRM_ERROR("relocation %u outside object", i); + if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) { + DRM_ERROR("relocation %u outside object\n", i); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 41b2c3aaa04a..37258b7d1bce 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2754,7 +2754,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) unmap_src: i915_gem_object_unpin_map(obj); put_obj: - i915_gem_object_put(wa_ctx->indirect_ctx.obj); + i915_gem_object_put(obj); return ret; } diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1ae0b4083ce1..fd0c85f9ef3c 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) struct device *dev = mdev_dev(vgpu->vdev.mdev); unsigned long gfn; - mutex_lock(&vgpu->vdev.cache_lock); - while ((node = rb_first(&vgpu->vdev.cache))) { + for (;;) { + mutex_lock(&vgpu->vdev.cache_lock); + node = rb_first(&vgpu->vdev.cache); + if (!node) { + mutex_unlock(&vgpu->vdev.cache_lock); + break; + } dma = rb_entry(node, struct gvt_dma, node); gvt_dma_unmap_iova(vgpu, dma->iova); gfn = dma->gfn; - - vfio_unpin_pages(dev, &gfn, 1); __gvt_cache_remove_entry(vgpu, dma); + mutex_unlock(&vgpu->vdev.cache_lock); + vfio_unpin_pages(dev, &gfn, 1); } - mutex_unlock(&vgpu->vdev.cache_lock); } static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4bd1467c17b1..0341061fbb34 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3018,7 +3018,7 @@ static void intel_connector_info(struct seq_file *m, connector->display_info.cea_rev); } - if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + if (!intel_encoder) return; switch (connector->connector_type) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 48428672fc6e..6ac8d37e7ab8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1087,10 +1087,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed, but we use them anyways to avoid - * stuck interrupts on some machines. + * be lost or delayed, and was defeatured. MSI interrupts seem to + * get lost on g4x as well, and interrupt delivery seems to stay + * properly dead afterwards. So we'll just disable them for all + * pre-gen5 chipsets. */ - if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 5) { if (pci_enable_msi(pdev) < 0) DRM_DEBUG_DRIVER("can't enable MSI"); } diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 7032c542a9b1..4dd4c2159a92 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) goto err_unpin; } + ret = req->engine->emit_flush(req, EMIT_INVALIDATE); + if (ret) + goto err_unpin; + ret = req->engine->emit_bb_start(req, so->batch_offset, so->batch_size, I915_DISPATCH_SECURE); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 060b171480d5..aac84dda532c 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1210,10 +1210,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return ret; } - ret = alloc_oa_buffer(dev_priv); - if (ret) - goto err_oa_buf_alloc; - /* PRM - observability performance counters: * * OACONTROL, performance counter enable, note: @@ -1229,6 +1225,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + ret = alloc_oa_buffer(dev_priv); + if (ret) + goto err_oa_buf_alloc; + ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv); if (ret) goto err_enable; @@ -1240,11 +1240,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return 0; err_enable: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); free_oa_buffer(dev_priv); err_oa_buf_alloc: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); if (stream->ctx) oa_put_render_ctx_id(stream); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 639d45c1dd2e..7ea7fd1e8856 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; uint8_t aux_channel, ddc_pin; /* Each DDI port can have more than one value on the "DVO Port" field, - * so look for all the possible values for each port and abort if more - * than one is found. */ + * so look for all the possible values for each port. + */ int dvo_ports[][3] = { {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, @@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, }; - /* Find the child device to use, abort if more than one found. */ + /* + * Find the first child device to reference the port, report if more + * than one found. + */ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { it = dev_priv->vbt.child_dev + i; @@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (it->common.dvo_port == dvo_ports[port][j]) { if (child) { - DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", + DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", port_name(port)); - return; + } else { + child = it; } - child = it; } } } diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index f29a226e24d8..6808f82ffb5d 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, int cdclk = cdclk_state->cdclk; u32 val, cmd; + /* There are cases where we can end up here with power domains + * off and a CDCLK frequency other than the minimum, like when + * issuing a modeset without actually changing any display after + * a system suspend. So grab the PIPE-A domain, which covers + * the HW blocks needed for the following programming. + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ cmd = 2; else if (cdclk == 266667) @@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, @@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, return; } + /* There are cases where we can end up here with power domains + * off and a CDCLK frequency other than the minimum, like when + * issuing a modeset without actually changing any display after + * a system suspend. So grab the PIPE-A domain, which covers + * the HW blocks needed for the following programming. + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + /* * Specs are full of misinformation, but testing on actual * hardware has shown that we just need to write the desired @@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } static int bdw_calc_cdclk(int max_pixclk) diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 306c6b06b330..17c4ae7e4e7c 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) } /* Program the max register to clamp values > 1.0. */ + i = lut_size - 1; I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), drm_color_lut_extract(lut[i].red, 16)); I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9106ea32b048..881df8843e66 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9085,6 +9085,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, u64 power_domain_mask; bool active; + if (INTEL_GEN(dev_priv) >= 9) { + intel_crtc_init_scalers(crtc, pipe_config); + + pipe_config->scaler_state.scaler_id = -1; + pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); + } + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; @@ -9113,13 +9120,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (INTEL_GEN(dev_priv) >= 9) { - intel_crtc_init_scalers(crtc, pipe_config); - - pipe_config->scaler_state.scaler_id = -1; - pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); - } - power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { power_domain_mask |= BIT_ULL(power_domain); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 332254a8eebe..e09f3690e299 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -535,13 +535,14 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) drm_fb_helper_fini(&ifbdev->helper); - if (ifbdev->fb) { + if (ifbdev->vma) { mutex_lock(&ifbdev->helper.dev->struct_mutex); intel_unpin_fb_vma(ifbdev->vma); mutex_unlock(&ifbdev->helper.dev->struct_mutex); + } + if (ifbdev->fb) drm_framebuffer_remove(&ifbdev->fb->base); - } kfree(ifbdev); } @@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; - if (!ifbdev || !ifbdev->fb) + if (!ifbdev || !ifbdev->vma) return; info = ifbdev->helper.fbdev; @@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; - if (ifbdev && ifbdev->fb) + if (ifbdev && ifbdev->vma) drm_fb_helper_hotplug_event(&ifbdev->helper); } @@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev) return; intel_fbdev_sync(ifbdev); - if (!ifbdev->fb) + if (!ifbdev->vma) return; if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 636031a30e17..8aca20209cb8 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) /* port@1 is the output port */ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge); - if (ret) + if (ret && ret != -ENODEV) return ret; imxpd->dev = dev; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index 1e1de6bfe85a..5893be9788d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h @@ -27,7 +27,7 @@ struct nv50_disp { u8 type[3]; } pior; - struct nv50_disp_chan *chan[17]; + struct nv50_disp_chan *chan[21]; }; int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c index c794b2c2d21e..6d8f21290aa2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c @@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base) if (bar->bar[0].mem) { addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; - nvkm_wr32(device, 0x001714, 0xc0000000 | addr); + nvkm_wr32(device, 0x001714, 0x80000000 | addr); } return 0; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index fa4f8f008e4d..e67ed383e11b 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -31,6 +31,7 @@ #include "radeon_asic.h" #include "atom.h" #include <linux/backlight.h> +#include <linux/dmi.h> extern int atom_debug; @@ -2184,9 +2185,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) goto assigned; } - /* on DCE32 and encoder can driver any block so just crtc id */ + /* + * On DCE32 any encoder can drive any block so usually just use crtc id, + * but Apple thinks different at least on iMac10,1, so there use linkb, + * otherwise the internal eDP panel will stay dark. + */ if (ASIC_IS_DCE32(rdev)) { - enc_idx = radeon_crtc->crtc_id; + if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1")) + enc_idx = (dig->linkb) ? 1 : 0; + else + enc_idx = radeon_crtc->crtc_id; + goto assigned; } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index ea36dc4dd5d2..24810492d2c1 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev) u32 vblank_time = r600_dpm_get_vblank_time(rdev); u32 switch_limit = pi->mem_gddr5 ? 450 : 300; + /* disable mclk switching if the refresh is >120Hz, even if the + * blanking period would allow it + */ + if (r600_dpm_get_vrefresh(rdev) > 120) + return true; + /* disable mclk switching if the refresh is >120Hz, even if the * blanking period would allow it */ diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 8ddd72cd5873..05601ab27d7c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -25,12 +25,20 @@ #include "sun4i_framebuffer.h" #include "sun4i_tcon.h" +static void sun4i_drv_lastclose(struct drm_device *dev) +{ + struct sun4i_drv *drv = dev->dev_private; + + drm_fbdev_cma_restore_mode(drv->fbdev); +} + DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); static struct drm_driver sun4i_drv_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, /* Generic Operations */ + .lastclose = sun4i_drv_lastclose, .fops = &sun4i_drv_fops, .name = "sun4i-drm", .desc = "Allwinner sun4i Display Engine", diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a6d7fcb99c0b..22b57020790d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1353,7 +1353,6 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) mem_type); return ret; } - dma_fence_put(man->move); man->use_type = false; man->has_type = false; @@ -1369,6 +1368,9 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) ret = (*man->func->takedown)(man); } + dma_fence_put(man->move); + man->move = NULL; + return ret; } EXPORT_SYMBOL(ttm_bo_clean_mm); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index c7b53d987f06..fefb9d995d2c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - return capable(CAP_SYS_ADMIN) ? : -EINVAL; + return -EINVAL; } static int vmw_cmd_ok(struct vmw_private *dev_priv, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 1d2db5d912b0..f8a977f86ec7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; + + if (plane->fb) { + hotspot_x += plane->fb->hot_x; + hotspot_y += plane->fb->hot_y; + } + du->cursor_surface = vps->surf; du->cursor_dmabuf = vps->dmabuf; @@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, vmw_cursor_update_position(dev_priv, true, du->cursor_x + hotspot_x, du->cursor_y + hotspot_y); + + du->core_hotspot_x = hotspot_x - du->hotspot_x; + du->core_hotspot_y = hotspot_y - du->hotspot_y; } else { DRM_ERROR("Failed to update cursor image\n"); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 50be1f034f9e..5284e8d2f7ba 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) * something arbitrarily large and we will reject any layout * that doesn't fit prim_bb_mem later */ - dev->mode_config.max_width = 16384; - dev->mode_config.max_height = 16384; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; } vmw_kms_create_implicit_placement_property(dev_priv, false); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 24d5b6deb571..be5bea15ce52 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -595,16 +595,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, return 0; } -static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi, - struct hid_field *field, struct hid_usage *usage, - unsigned long **bit, int *max) -{ - if (usage->type == EV_KEY || usage->type == EV_ABS) - set_bit(usage->type, hi->input->evbit); - - return -1; -} - static int mt_compute_slot(struct mt_device *td, struct input_dev *input) { __s32 quirks = td->mtclass.quirks; @@ -905,8 +895,10 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, return 0; if (field->application == HID_DG_TOUCHSCREEN || - field->application == HID_DG_TOUCHPAD) - return mt_touch_input_mapped(hdev, hi, field, usage, bit, max); + field->application == HID_DG_TOUCHPAD) { + /* We own these mappings, tell hid-input to ignore them */ + return -1; + } /* let hid-core decide for the others */ return 0; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 736ac76d2a6a..3cea1216754e 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -606,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel) get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); out: + /* re-enable tasklet for use on re-open */ + tasklet_enable(&channel->callback_event); return ret; } diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index 12e7baecb84e..f8c761fb71ad 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -82,10 +82,15 @@ int hv_post_message(union hv_connection_id connection_id, aligned_msg->message_type = message_type; aligned_msg->payload_size = payload_size; memcpy((void *)aligned_msg->payload, payload, payload_size); - put_cpu_ptr(hv_cpu); status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL); + /* Preemption must remain disabled until after the hypercall + * so some other thread can't get scheduled onto this cpu and + * corrupt the per-cpu post_msg_page + */ + put_cpu_ptr(hv_cpu); + return status & 0xFFFF; } diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index d1263b82d646..eea71c4e969d 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -254,6 +254,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) } acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); + /* Some broken DSTDs use 1MiHz instead of 1MHz */ + if (acpi_speed == 1048576) + acpi_speed = 1000000; /* * Find bus speed from the "clock-frequency" device property, ACPI * or by using fast mode if neither is set. @@ -389,7 +392,7 @@ static void dw_i2c_plat_complete(struct device *dev) #endif #ifdef CONFIG_PM -static int dw_i2c_plat_suspend(struct device *dev) +static int dw_i2c_plat_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); @@ -411,11 +414,21 @@ static int dw_i2c_plat_resume(struct device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int dw_i2c_plat_suspend(struct device *dev) +{ + pm_runtime_resume(dev); + return dw_i2c_plat_runtime_suspend(dev); +} +#endif + static const struct dev_pm_ops dw_i2c_dev_pm_ops = { .prepare = dw_i2c_plat_prepare, .complete = dw_i2c_plat_complete, SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) - SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) + SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, + dw_i2c_plat_resume, + NULL) }; #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 6b5d3be283c4..807299dd45eb 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -193,7 +193,6 @@ struct bmc150_accel_data { struct regmap *regmap; int irq; struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; - atomic_t active_intr; struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; struct mutex mutex; u8 fifo_mode, watermark; @@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i, goto out_fix_power_state; } - if (state) - atomic_inc(&data->active_intr); - else - atomic_dec(&data->active_intr); - return 0; out_fix_power_state: @@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev) struct bmc150_accel_data *data = iio_priv(indio_dev); mutex_lock(&data->mutex); - if (atomic_read(&data->active_intr)) - bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); + bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0); bmc150_accel_fifo_set_mode(data); mutex_unlock(&data->mutex); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 784670e2736b..2ee3ae11eb2a 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x02, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .en_mask = 0x08, }, }, + .sim = { + .addr = 0x24, + .value = BIT(0), + }, .multi_read_bit = false, .bootime = 2, }, @@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int1 = 0x04, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(1), + }, .multi_read_bit = true, .bootime = 2, /* guess */ }, @@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(7), + }, .multi_read_bit = false, .bootime = 2, /* guess */ }, @@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .addr_ihl = 0x22, .mask_ihl = 0x80, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, @@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_int1 = 0x04, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x21, + .value = BIT(1), + }, .multi_read_bit = false, .bootime = 2, }, @@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .mask_ihl = 0x02, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, + .sim = { + .addr = 0x23, + .value = BIT(0), + }, .multi_read_bit = true, .bootime = 2, }, diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 62670cbfa2bb..87fd6e0ce5ee 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -22,6 +22,7 @@ #include <linux/iio/iio.h> #include <linux/iio/driver.h> +#include <linux/iopoll.h> #define ASPEED_RESOLUTION_BITS 10 #define ASPEED_CLOCKS_PER_SAMPLE 12 @@ -38,11 +39,17 @@ #define ASPEED_ENGINE_ENABLE BIT(0) +#define ASPEED_ADC_CTRL_INIT_RDY BIT(8) + +#define ASPEED_ADC_INIT_POLLING_TIME 500 +#define ASPEED_ADC_INIT_TIMEOUT 500000 + struct aspeed_adc_model_data { const char *model_name; unsigned int min_sampling_rate; // Hz unsigned int max_sampling_rate; // Hz unsigned int vref_voltage; // mV + bool wait_init_sequence; }; struct aspeed_adc_data { @@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev) goto scaler_error; } + model_data = of_device_get_match_data(&pdev->dev); + + if (model_data->wait_init_sequence) { + /* Enable engine in normal mode. */ + writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE, + data->base + ASPEED_REG_ENGINE_CONTROL); + + /* Wait for initial sequence complete. */ + ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL, + adc_engine_control_reg_val, + adc_engine_control_reg_val & + ASPEED_ADC_CTRL_INIT_RDY, + ASPEED_ADC_INIT_POLLING_TIME, + ASPEED_ADC_INIT_TIMEOUT); + if (ret) + goto scaler_error; + } + /* Start all channels in normal mode. */ clk_prepare_enable(data->clk_scaler->clk); adc_engine_control_reg_val = GENMASK(31, 16) | @@ -270,6 +295,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = { .vref_voltage = 1800, // mV .min_sampling_rate = 1, .max_sampling_rate = 1000000, + .wait_init_sequence = true, }; static const struct of_device_id aspeed_adc_matches[] = { diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 64799ad7ebad..7fd24949c0c1 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -28,6 +28,8 @@ #include <linux/iio/driver.h> #define AXP288_ADC_EN_MASK 0xF1 +#define AXP288_ADC_TS_PIN_GPADC 0xF2 +#define AXP288_ADC_TS_PIN_ON 0xF3 enum axp288_adc_id { AXP288_ADC_TS, @@ -121,6 +123,16 @@ static int axp288_adc_read_channel(int *val, unsigned long address, return IIO_VAL_INT; } +static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, + unsigned long address) +{ + /* channels other than GPADC do not need to switch TS pin */ + if (address != AXP288_GP_ADC_H) + return 0; + + return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); +} + static int axp288_adc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -131,7 +143,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); switch (mask) { case IIO_CHAN_INFO_RAW: + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, + chan->address)) { + dev_err(&indio_dev->dev, "GPADC mode\n"); + ret = -EINVAL; + break; + } ret = axp288_adc_read_channel(val, chan->address, info->regmap); + if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, + chan->address)) + dev_err(&indio_dev->dev, "TS pin restore\n"); break; default: ret = -EINVAL; @@ -141,6 +162,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, return ret; } +static int axp288_adc_set_state(struct regmap *regmap) +{ + /* ADC should be always enabled for internal FG to function */ + if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) + return -EIO; + + return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); +} + static const struct iio_info axp288_adc_iio_info = { .read_raw = &axp288_adc_read_raw, .driver_module = THIS_MODULE, @@ -169,7 +199,7 @@ static int axp288_adc_probe(struct platform_device *pdev) * Set ADC to enabled state at all time, including system suspend. * otherwise internal fuel gauge functionality may be affected. */ - ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); + ret = axp288_adc_set_state(axp20x->regmap); if (ret) { dev_err(&pdev->dev, "unable to enable ADC device\n"); return ret; diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 01fc76f7d660..c168e0db329a 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -77,7 +77,7 @@ #define VF610_ADC_ADSTS_MASK 0x300 #define VF610_ADC_ADLPC_EN 0x80 #define VF610_ADC_ADHSC_EN 0x400 -#define VF610_ADC_REFSEL_VALT 0x100 +#define VF610_ADC_REFSEL_VALT 0x800 #define VF610_ADC_REFSEL_VBG 0x1000 #define VF610_ADC_ADTRG_HARD 0x2000 #define VF610_ADC_AVGS_8 0x4000 diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index 0b5dea050239..6dda332f252a 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c @@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) s32 poll_value = 0; if (state) { - if (!atomic_read(&st->user_requested_state)) - return 0; if (sensor_hub_device_open(st->hsdev)) return -EIO; @@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) &report_val); } + pr_debug("HID_SENSOR %s set power_state %d report_state %d\n", + st->pdev->name, state_val, report_val); + sensor_hub_get_feature(st->hsdev, st->power_state.report_id, st->power_state.index, sizeof(state_val), &state_val); @@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state) ret = pm_runtime_get_sync(&st->pdev->dev); else { pm_runtime_mark_last_busy(&st->pdev->dev); + pm_runtime_use_autosuspend(&st->pdev->dev); ret = pm_runtime_put_autosuspend(&st->pdev->dev); } if (ret < 0) { @@ -205,8 +207,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name, /* Default to 3 seconds, but can be changed from sysfs */ pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, 3000); - pm_runtime_use_autosuspend(&attrb->pdev->dev); - return ret; error_unreg_trigger: iio_trigger_unregister(trig); diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 79c8c7cd70d5..6e6a1ecc99dd 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -550,6 +550,31 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, } EXPORT_SYMBOL(st_sensors_read_info_raw); +static int st_sensors_init_interface_mode(struct iio_dev *indio_dev, + const struct st_sensor_settings *sensor_settings) +{ + struct st_sensor_data *sdata = iio_priv(indio_dev); + struct device_node *np = sdata->dev->of_node; + struct st_sensors_platform_data *pdata; + + pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data; + if (((np && of_property_read_bool(np, "spi-3wire")) || + (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) { + int err; + + err = sdata->tf->write_byte(&sdata->tb, sdata->dev, + sensor_settings->sim.addr, + sensor_settings->sim.value); + if (err < 0) { + dev_err(&indio_dev->dev, + "failed to init interface mode\n"); + return err; + } + } + + return 0; +} + int st_sensors_check_device_support(struct iio_dev *indio_dev, int num_sensors_list, const struct st_sensor_settings *sensor_settings) @@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev, return -ENODEV; } + err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]); + if (err < 0) + return err; + if (sensor_settings[i].wai_addr) { err = sdata->tf->read_byte(&sdata->tb, sdata->dev, sensor_settings[i].wai_addr, &wai); diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c index 8cf84d3488b2..12898424d838 100644 --- a/drivers/iio/imu/adis16480.c +++ b/drivers/iio/imu/adis16480.c @@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = { .gyro_max_val = IIO_RAD_TO_DEGREE(22500), .gyro_max_scale = 450, .accel_max_val = IIO_M_S_2_TO_G(12500), - .accel_max_scale = 5, + .accel_max_scale = 10, }, [ADIS16485] = { .channels = adis16485_channels, diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c index e7d4ea75e007..7599693f7fe9 100644 --- a/drivers/iio/light/tsl2563.c +++ b/drivers/iio/light/tsl2563.c @@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private) struct tsl2563_chip *chip = iio_priv(dev_info); iio_push_event(dev_info, - IIO_UNMOD_EVENT_CODE(IIO_LIGHT, + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER), diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 8e1b0861fbe4..c38563699984 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c @@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = { .drdy_irq = { .addr = 0x62, .mask_int1 = 0x01, - .addr_ihl = 0x63, - .mask_ihl = 0x04, - .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, + .addr_stat_drdy = 0x67, }, .multi_read_bit = false, .bootime = 2, diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index fd0edca0e656..99448012b47f 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { .mask_od = 0x40, .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, }, - .multi_read_bit = true, + .multi_read_bit = false, .bootime = 2, }, }; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index ece6926fa2e6..5da2ac7cf61a 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -510,6 +510,11 @@ static int addr_resolve(struct sockaddr *src_in, struct dst_entry *dst; int ret; + if (!addr->net) { + pr_warn_ratelimited("%s: missing namespace\n", __func__); + return -EINVAL; + } + if (src_in->sa_family == AF_INET) { struct rtable *rt = NULL; const struct sockaddr_in *dst_in4 = @@ -547,7 +552,6 @@ static int addr_resolve(struct sockaddr *src_in, } addr->bound_dev_if = ndev->ifindex; - addr->net = dev_net(ndev); dev_put(ndev); return ret; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 31bb82d8ecd7..d65a09317719 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1044,6 +1044,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); + qp_attr->port_num = id_priv->id.port_num; + *qp_attr_mask |= IB_QP_PORT; } else ret = -ENOSYS; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 70b7fb156414..5ea2d80800f9 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, cq->uobject = &obj->uobject; cq->comp_handler = ib_uverbs_comp_handler; cq->event_handler = ib_uverbs_cq_event_handler; - cq->cq_context = &ev_file->ev_queue; + cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; atomic_set(&cq->usecnt, 0); obj->uobject.object = cq; @@ -1931,6 +1931,12 @@ static int modify_qp(struct ib_uverbs_file *file, goto out; } + if ((cmd->base.attr_mask & IB_QP_PORT) && + !rdma_is_port_valid(qp->device, cmd->base.port_num)) { + ret = -EINVAL; + goto release_qp; + } + attr->qp_state = cmd->base.qp_state; attr->cur_qp_state = cmd->base.cur_qp_state; attr->path_mtu = cmd->base.path_mtu; @@ -2541,6 +2547,9 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) + return -EINVAL; + INIT_UDATA(&udata, buf + sizeof(cmd), (unsigned long)cmd.response + sizeof(resp), in_len - sizeof(cmd), out_len - sizeof(resp)); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 366433f71b58..08512d4c8e89 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1779,7 +1779,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { - if (unlikely(i > mr->max_descs)) + if (unlikely(i >= mr->max_descs)) break; klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 5a887efb4bdf..37b33d708c2d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht; static struct iscsi_transport iscsi_iser_transport; static struct scsi_transport_template *iscsi_iser_scsi_transport; static struct workqueue_struct *release_wq; +static DEFINE_MUTEX(unbind_iser_conn_mutex); struct iser_global ig; int iser_debug_level = 0; @@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) */ if (iser_conn) { mutex_lock(&iser_conn->state_mutex); + mutex_lock(&unbind_iser_conn_mutex); iser_conn_terminate(iser_conn); iscsi_conn_stop(cls_conn, flag); /* unbind */ iser_conn->iscsi_conn = NULL; conn->dd_data = NULL; + mutex_unlock(&unbind_iser_conn_mutex); complete(&iser_conn->stop_completion); mutex_unlock(&iser_conn->state_mutex); @@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev) struct iser_conn *iser_conn; struct ib_device *ib_dev; + mutex_lock(&unbind_iser_conn_mutex); + session = starget_to_session(scsi_target(sdev))->dd_data; iser_conn = session->leadconn->dd_data; + if (!iser_conn) { + mutex_unlock(&unbind_iser_conn_mutex); + return -ENOTCONN; + } ib_dev = iser_conn->ib_conn.device->ib_device; if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); + mutex_unlock(&unbind_iser_conn_mutex); + return 0; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index fcbed35e95a8..0e662656ef42 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1452,7 +1452,7 @@ static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct isert_conn *isert_conn = wc->qp->qp_context; - struct ib_device *ib_dev = isert_conn->cm_id->device; + struct ib_device *ib_dev = isert_conn->device->ib_device; if (unlikely(wc->status != IB_WC_SUCCESS)) { isert_print_wc(wc, "login recv"); diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 262d1057c1da..850b00e3ad8e 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f, case SS4_PACKET_ID_TWO: if (priv->flags & ALPS_BUTTONPAD) { - f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + } else { + f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); + f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); + } f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); - f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); } else { - f->mt[0].x = SS4_STD_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); + } else { + f->mt[0].x = SS4_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + } f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); - f->mt[1].x = SS4_STD_MF_X_V2(p, 1); f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); } f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; @@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f, case SS4_PACKET_ID_MULTI: if (priv->flags & ALPS_BUTTONPAD) { - f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1); + } else { + f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); + f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); + } + f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); - f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); no_data_x = SS4_MFPACKET_NO_AX_BL; no_data_y = SS4_MFPACKET_NO_AY_BL; } else { - f->mt[2].x = SS4_STD_MF_X_V2(p, 0); + if (IS_SS4PLUS_DEV(priv->dev_id)) { + f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1); + } else { + f->mt[0].x = SS4_STD_MF_X_V2(p, 0); + f->mt[1].x = SS4_STD_MF_X_V2(p, 1); + } f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); - f->mt[3].x = SS4_STD_MF_X_V2(p, 1); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); no_data_x = SS4_MFPACKET_NO_AX; no_data_y = SS4_MFPACKET_NO_AY; @@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, memset(otp, 0, sizeof(otp)); - if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || - alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) + if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) || + alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0])) return -1; alps_update_device_area_ss4_v2(otp, priv); diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index ed2d6879fa52..c80a7c76cb76 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -100,6 +100,10 @@ enum SS4_PACKET_ID { ((_b[1 + _i * 3] << 5) & 0x1F00) \ ) +#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \ + ((_b[1 + (_i) * 3] << 4) & 0x0F80) \ + ) + #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ ((_b[2 + (_i) * 3] << 4) & 0x0E00) \ @@ -109,6 +113,10 @@ enum SS4_PACKET_ID { ((_b[0 + (_i) * 3] >> 3) & 0x0010) \ ) +#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \ + ((_b[0 + (_i) * 3] >> 4) & 0x0008) \ + ) + #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ ((_b[0 + (_i) * 3] >> 3) & 0x0008) \ ) diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index d5ab9ddef3e3..4f3d3543b2fb 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1223,7 +1223,12 @@ static const struct acpi_device_id elan_acpi_id[] = { { "ELAN0000", 0 }, { "ELAN0100", 0 }, { "ELAN0600", 0 }, + { "ELAN0602", 0 }, { "ELAN0605", 0 }, + { "ELAN0608", 0 }, + { "ELAN0605", 0 }, + { "ELAN0609", 0 }, + { "ELAN060B", 0 }, { "ELAN1000", 0 }, { } }; diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 922ea02edcc3..fb3810d35c44 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) return -1; - if (param[0] != TP_MAGIC_IDENT) + /* add new TP ID. */ + if (!(param[0] & TP_MAGIC_IDENT)) return -1; if (firmware_id) diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h index 5617ed3a7d7a..88055755f82e 100644 --- a/drivers/input/mouse/trackpoint.h +++ b/drivers/input/mouse/trackpoint.h @@ -21,8 +21,9 @@ #define TP_COMMAND 0xE2 /* Commands start with this */ #define TP_READ_ID 0xE1 /* Sent for device identification */ -#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ +#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */ /* by the firmware ID */ + /* Firmware ID includes 0x1, 0x2, 0x3 */ /* diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index c52da651269b..824f4c1c1f31 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -436,8 +436,10 @@ static int i8042_start(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = true; - mb(); + spin_unlock_irq(&i8042_lock); + return 0; } @@ -450,16 +452,20 @@ static void i8042_stop(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = false; + port->serio = NULL; + spin_unlock_irq(&i8042_lock); /* + * We need to make sure that interrupt handler finishes using + * our serio port before we return from this function. * We synchronize with both AUX and KBD IRQs because there is * a (very unlikely) chance that AUX IRQ is raised for KBD port * and vice versa. */ synchronize_irq(I8042_AUX_IRQ); synchronize_irq(I8042_KBD_IRQ); - port->serio = NULL; } /* @@ -576,7 +582,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&i8042_lock, flags); - if (likely(port->exists && !filtered)) + if (likely(serio && !filtered)) serio_interrupt(serio, data, dfl); out: diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 0f1219fa8561..28fbc81c6e9e 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4316,6 +4316,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) /* Setting */ irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); irte->hi.fields.vector = vcpu_pi_info->vector; + irte->lo.fields_vapic.ga_log_intr = 1; irte->lo.fields_vapic.guest_mode = 1; irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 4de8f4160bb8..09f9dd166827 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -571,7 +571,9 @@ struct amd_iommu { static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) { - return container_of(dev, struct amd_iommu, iommu.dev); + struct iommu_device *iommu = dev_to_iommu_device(dev); + + return container_of(iommu, struct amd_iommu, iommu); } #define ACPIHID_UID_LEN 256 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 7ec30b08b3bd..7ecd1a0b8419 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -312,6 +312,14 @@ enum arm_smmu_implementation { CAVIUM_SMMUV2, }; +/* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_CORELINK_MMU401 +#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4 +#endif +#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX +#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5 +#endif + struct arm_smmu_s2cr { struct iommu_group *group; int count; @@ -2073,6 +2081,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) smmu->version = ARM_SMMU_V1; smmu->model = GENERIC_SMMU; break; + case ACPI_IORT_SMMU_CORELINK_MMU401: + smmu->version = ARM_SMMU_V1_64K; + smmu->model = GENERIC_SMMU; + break; case ACPI_IORT_SMMU_V2: smmu->version = ARM_SMMU_V2; smmu->model = GENERIC_SMMU; @@ -2081,6 +2093,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) smmu->version = ARM_SMMU_V2; smmu->model = ARM_MMU500; break; + case ACPI_IORT_SMMU_CAVIUM_THUNDERX: + smmu->version = ARM_SMMU_V2; + smmu->model = CAVIUM_SMMUV2; + break; default: ret = -ENODEV; } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index fc2765ccdb57..76791fded8a4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4749,7 +4749,9 @@ static void intel_disable_iommus(void) static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) { - return container_of(dev, struct intel_iommu, iommu.dev); + struct iommu_device *iommu_dev = dev_to_iommu_device(dev); + + return container_of(iommu_dev, struct intel_iommu, iommu); } static ssize_t intel_iommu_show_version(struct device *dev, diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index c58351ed61c1..36d1a7ce7fc4 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c @@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu, va_list vargs; int ret; - device_initialize(&iommu->dev); + iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL); + if (!iommu->dev) + return -ENOMEM; - iommu->dev.class = &iommu_class; - iommu->dev.parent = parent; - iommu->dev.groups = groups; + device_initialize(iommu->dev); + + iommu->dev->class = &iommu_class; + iommu->dev->parent = parent; + iommu->dev->groups = groups; va_start(vargs, fmt); - ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); + ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs); va_end(vargs); if (ret) goto error; - ret = device_add(&iommu->dev); + ret = device_add(iommu->dev); if (ret) goto error; + dev_set_drvdata(iommu->dev, iommu); + return 0; error: - put_device(&iommu->dev); + put_device(iommu->dev); return ret; } void iommu_device_sysfs_remove(struct iommu_device *iommu) { - device_unregister(&iommu->dev); + dev_set_drvdata(iommu->dev, NULL); + device_unregister(iommu->dev); + iommu->dev = NULL; } /* * IOMMU drivers can indicate a device is managed by a given IOMMU using @@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link) if (!iommu || IS_ERR(iommu)) return -ENODEV; - ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", + ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices", &link->kobj, dev_name(link)); if (ret) return ret; - ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); + ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu"); if (ret) - sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", + sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); return ret; @@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link) return; sysfs_remove_link(&link->kobj, "iommu"); - sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); + sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link)); } diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c index 28b26c80f4cf..056507099725 100644 --- a/drivers/irqchip/irq-atmel-aic-common.c +++ b/drivers/irqchip/irq-atmel-aic-common.c @@ -142,9 +142,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root) struct device_node *np; void __iomem *regs; - np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc"); + np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc"); if (!np) - np = of_find_compatible_node(root, NULL, + np = of_find_compatible_node(NULL, NULL, "atmel,at91sam9x5-rtc"); if (!np) @@ -196,7 +196,6 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches) return; match = of_match_node(matches, root); - of_node_put(root); if (match) { void (*fixup)(struct device_node *) = match->data; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index c132f29322cc..dbffb7ab6203 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -645,6 +645,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, int enabled; u64 val; + if (cpu >= nr_cpu_ids) + return -EINVAL; + if (gic_irq_in_rdist(d)) return -EINVAL; diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c index 9b856e1890d1..e4c43a17b333 100644 --- a/drivers/isdn/i4l/isdn_common.c +++ b/drivers/isdn/i4l/isdn_common.c @@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg) if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; + bname[sizeof(bname)-1] = 0; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index c151c6daa67e..f63a110b7bcb 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c @@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm) char newname[10]; if (p) { - /* Slave-Name MUST not be empty */ - if (!strlen(p + 1)) + /* Slave-Name MUST not be empty or overflow 'newname' */ + if (strscpy(newname, p + 1, sizeof(newname)) <= 0) return NULL; - strcpy(newname, p + 1); *p = 0; /* Master must already exist */ if (!(n = isdn_net_findif(parm))) diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 93b181088168..b68e21c25a17 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1587,16 +1587,18 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map if (likely(ic->mode == 'J')) { if (dio->write) { unsigned next_entry, i, pos; - unsigned ws, we; + unsigned ws, we, range_sectors; - dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); + dio->range.n_sectors = min(dio->range.n_sectors, + ic->free_sectors << ic->sb->log2_sectors_per_block); if (unlikely(!dio->range.n_sectors)) goto sleep; - ic->free_sectors -= dio->range.n_sectors; + range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; + ic->free_sectors -= range_sectors; journal_section = ic->free_section; journal_entry = ic->free_section_entry; - next_entry = ic->free_section_entry + dio->range.n_sectors; + next_entry = ic->free_section_entry + range_sectors; ic->free_section_entry = next_entry % ic->journal_section_entries; ic->free_section += next_entry / ic->journal_section_entries; ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; @@ -3019,6 +3021,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->error = "Block size doesn't match the information in superblock"; goto bad; } + if (!le32_to_cpu(ic->sb->journal_sections)) { + r = -EINVAL; + ti->error = "Corrupted superblock, journal_sections is 0"; + goto bad; + } /* make sure that ti->max_io_len doesn't overflow */ if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index b4b75dad816a..2e10c2f13a34 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -1571,7 +1571,7 @@ static sector_t __rdev_sectors(struct raid_set *rs) return rdev->sectors; } - BUG(); /* Constructor ensures we got some. */ + return 0; } /* Calculate the sectors per device and per array used for @rs */ @@ -2941,7 +2941,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) bool resize; struct raid_type *rt; unsigned int num_raid_params, num_raid_devs; - sector_t calculated_dev_sectors; + sector_t calculated_dev_sectors, rdev_sectors; struct raid_set *rs = NULL; const char *arg; struct rs_layout rs_layout; @@ -3017,7 +3017,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto bad; - resize = calculated_dev_sectors != __rdev_sectors(rs); + rdev_sectors = __rdev_sectors(rs); + if (!rdev_sectors) { + ti->error = "Invalid rdev size"; + r = -EINVAL; + goto bad; + } + + resize = calculated_dev_sectors != rdev_sectors; INIT_WORK(&rs->md.event_work, do_table_event); ti->private = rs; diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index b0536cfd8e17..06a64d5d8c6c 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode) conf->nfaults = n+1; } -static void faulty_make_request(struct mddev *mddev, struct bio *bio) +static bool faulty_make_request(struct mddev *mddev, struct bio *bio) { struct faulty_conf *conf = mddev->private; int failit = 0; @@ -182,7 +182,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio) * just fail immediately */ bio_io_error(bio); - return; + return true; } if (check_sector(conf, bio->bi_iter.bi_sector, @@ -224,6 +224,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio) bio->bi_bdev = conf->rdev->bdev; generic_make_request(bio); + return true; } static void faulty_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/linear.c b/drivers/md/linear.c index df6f2c98eca7..5f1eb9189542 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -245,7 +245,7 @@ static void linear_free(struct mddev *mddev, void *priv) kfree(conf); } -static void linear_make_request(struct mddev *mddev, struct bio *bio) +static bool linear_make_request(struct mddev *mddev, struct bio *bio) { char b[BDEVNAME_SIZE]; struct dev_info *tmp_dev; @@ -254,7 +254,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } tmp_dev = which_dev(mddev, bio_sector); @@ -292,7 +292,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) mddev_check_write_zeroes(mddev, bio); generic_make_request(bio); } - return; + return true; out_of_bounds: pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n", @@ -302,6 +302,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio) (unsigned long long)tmp_dev->rdev->sectors, (unsigned long long)start_sector); bio_io_error(bio); + return true; } static void linear_status (struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/md.c b/drivers/md/md.c index 87edc342ccb3..caca5d689cdc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -277,7 +277,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) bio_endio(bio); return BLK_QC_T_NONE; } - smp_rmb(); /* Ensure implications of 'active' are visible */ +check_suspended: rcu_read_lock(); if (mddev->suspended) { DEFINE_WAIT(__wait); @@ -302,7 +302,11 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) sectors = bio_sectors(bio); /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; - mddev->pers->make_request(mddev, bio); + if (!mddev->pers->make_request(mddev, bio)) { + atomic_dec(&mddev->active_io); + wake_up(&mddev->sb_wait); + goto check_suspended; + } cpu = part_stat_lock(); part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); @@ -327,6 +331,7 @@ void mddev_suspend(struct mddev *mddev) if (mddev->suspended++) return; synchronize_rcu(); + wake_up(&mddev->sb_wait); wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); mddev->pers->quiesce(mddev, 1); @@ -7950,12 +7955,14 @@ EXPORT_SYMBOL(md_done_sync); * If we need to update some array metadata (e.g. 'active' flag * in superblock) before writing, schedule a superblock update * and wait for it to complete. + * A return value of 'false' means that the write wasn't recorded + * and cannot proceed as the array is being suspend. */ -void md_write_start(struct mddev *mddev, struct bio *bi) +bool md_write_start(struct mddev *mddev, struct bio *bi) { int did_change = 0; if (bio_data_dir(bi) != WRITE) - return; + return true; BUG_ON(mddev->ro == 1); if (mddev->ro == 2) { @@ -7972,7 +7979,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi) if (mddev->safemode == 1) mddev->safemode = 0; /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ - if (mddev->in_sync || !mddev->sync_checkers) { + if (mddev->in_sync || mddev->sync_checkers) { spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; @@ -7987,7 +7994,12 @@ void md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); wait_event(mddev->sb_wait, - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended); + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { + percpu_ref_put(&mddev->writes_pending); + return false; + } + return true; } EXPORT_SYMBOL(md_write_start); @@ -8627,6 +8639,9 @@ void md_check_recovery(struct mddev *mddev) if (mddev_trylock(mddev)) { int spares = 0; + if (!mddev->external && mddev->safemode == 1) + mddev->safemode = 0; + if (mddev->ro) { struct md_rdev *rdev; if (!mddev->external && mddev->in_sync) diff --git a/drivers/md/md.h b/drivers/md/md.h index 0fa1de42c42b..33611a91b1d9 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -510,7 +510,7 @@ struct md_personality int level; struct list_head list; struct module *owner; - void (*make_request)(struct mddev *mddev, struct bio *bio); + bool (*make_request)(struct mddev *mddev, struct bio *bio); int (*run)(struct mddev *mddev); void (*free)(struct mddev *mddev, void *priv); void (*status)(struct seq_file *seq, struct mddev *mddev); @@ -649,7 +649,7 @@ extern void md_wakeup_thread(struct md_thread *thread); extern void md_check_recovery(struct mddev *mddev); extern void md_reap_sync_thread(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev); -extern void md_write_start(struct mddev *mddev, struct bio *bi); +extern bool md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); @@ -733,7 +733,6 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio /* for managing resync I/O pages */ struct resync_pages { - unsigned idx; /* for get/put page from the pool */ void *raid_bio; struct page *pages[RESYNC_PAGES]; }; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index e95d521d93e9..c8d985ba008d 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio) rdev_dec_pending(rdev, conf->mddev); } -static void multipath_make_request(struct mddev *mddev, struct bio * bio) +static bool multipath_make_request(struct mddev *mddev, struct bio * bio) { struct mpconf *conf = mddev->private; struct multipath_bh * mp_bh; @@ -114,7 +114,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } mp_bh = mempool_alloc(conf->pool, GFP_NOIO); @@ -126,7 +126,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) if (mp_bh->path < 0) { bio_io_error(bio); mempool_free(mp_bh, conf->pool); - return; + return true; } multipath = conf->multipaths + mp_bh->path; @@ -141,7 +141,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) mddev_check_writesame(mddev, &mp_bh->bio); mddev_check_write_zeroes(mddev, &mp_bh->bio); generic_make_request(&mp_bh->bio); - return; + return true; } static void multipath_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index d6c0bc76e837..94d9ae9b0fd0 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -548,7 +548,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) bio_endio(bio); } -static void raid0_make_request(struct mddev *mddev, struct bio *bio) +static bool raid0_make_request(struct mddev *mddev, struct bio *bio) { struct strip_zone *zone; struct md_rdev *tmp_dev; @@ -559,12 +559,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { raid0_handle_discard(mddev, bio); - return; + return true; } bio_sector = bio->bi_iter.bi_sector; @@ -599,6 +599,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) mddev_check_writesame(mddev, bio); mddev_check_write_zeroes(mddev, bio); generic_make_request(bio); + return true; } static void raid0_status(struct seq_file *seq, struct mddev *mddev) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e1a7e3d4c5e4..5de4b3d04eb5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -170,7 +170,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) resync_get_all_pages(rp); } - rp->idx = 0; rp->raid_bio = r1_bio; bio->bi_private = rp; } @@ -492,10 +491,6 @@ static void raid1_end_write_request(struct bio *bio) } if (behind) { - /* we release behind master bio when all write are done */ - if (r1_bio->behind_master_bio == bio) - to_put = NULL; - if (test_bit(WriteMostly, &rdev->flags)) atomic_dec(&r1_bio->behind_remaining); @@ -1088,7 +1083,7 @@ static void unfreeze_array(struct r1conf *conf) wake_up(&conf->wait_barrier); } -static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, +static void alloc_behind_master_bio(struct r1bio *r1_bio, struct bio *bio) { int size = bio->bi_iter.bi_size; @@ -1098,11 +1093,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); if (!behind_bio) - goto fail; + return; /* discard op, we don't support writezero/writesame yet */ - if (!bio_has_data(bio)) + if (!bio_has_data(bio)) { + behind_bio->bi_iter.bi_size = size; goto skip_copy; + } while (i < vcnt && size) { struct page *page; @@ -1123,14 +1120,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, r1_bio->behind_master_bio = behind_bio;; set_bit(R1BIO_BehindIO, &r1_bio->state); - return behind_bio; + return; free_pages: pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_iter.bi_size); bio_free_pages(behind_bio); -fail: - return behind_bio; + bio_put(behind_bio); } struct raid1_plug_cb { @@ -1321,7 +1317,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, * Continue immediately if no resync is active currently. */ - md_write_start(mddev, bio); /* wait on superblock update early */ if ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || @@ -1335,7 +1330,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, */ DEFINE_WAIT(w); for (;;) { - flush_signals(current); + sigset_t full, old; prepare_to_wait(&conf->wait_barrier, &w, TASK_INTERRUPTIBLE); if (bio_end_sector(bio) <= mddev->suspend_lo || @@ -1345,7 +1340,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, bio->bi_iter.bi_sector, bio_end_sector(bio)))) break; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); } finish_wait(&conf->wait_barrier, &w); } @@ -1481,7 +1479,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, (atomic_read(&bitmap->behind_writes) < mddev->bitmap_info.max_write_behind) && !waitqueue_active(&bitmap->behind_wait)) { - mbio = alloc_behind_master_bio(r1_bio, bio); + alloc_behind_master_bio(r1_bio, bio); } bitmap_startwrite(bitmap, r1_bio->sector, @@ -1491,14 +1489,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, first_clone = 0; } - if (!mbio) { - if (r1_bio->behind_master_bio) - mbio = bio_clone_fast(r1_bio->behind_master_bio, - GFP_NOIO, - mddev->bio_set); - else - mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); - } + if (r1_bio->behind_master_bio) + mbio = bio_clone_fast(r1_bio->behind_master_bio, + GFP_NOIO, mddev->bio_set); + else + mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); if (r1_bio->behind_master_bio) { if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) @@ -1550,13 +1545,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, wake_up(&conf->wait_barrier); } -static void raid1_make_request(struct mddev *mddev, struct bio *bio) +static bool raid1_make_request(struct mddev *mddev, struct bio *bio) { sector_t sectors; if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } /* @@ -1571,8 +1566,12 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio) if (bio_data_dir(bio) == READ) raid1_read_request(mddev, bio, sectors, NULL); - else + else { + if (!md_write_start(mddev,bio)) + return false; raid1_write_request(mddev, bio, sectors); + } + return true; } static void raid1_status(struct seq_file *seq, struct mddev *mddev) @@ -2362,8 +2361,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) wbio = bio_clone_fast(r1_bio->behind_master_bio, GFP_NOIO, mddev->bio_set); - /* We really need a _all clone */ - wbio->bi_iter = (struct bvec_iter){ 0 }; } else { wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, mddev->bio_set); @@ -2615,6 +2612,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, int good_sectors = RESYNC_SECTORS; int min_bad = 0; /* number of sectors that are bad in all devices */ int idx = sector_to_idx(sector_nr); + int page_idx = 0; if (!conf->r1buf_pool) if (init_resync(conf)) @@ -2842,7 +2840,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, bio = r1_bio->bios[i]; rp = get_resync_pages(bio); if (bio->bi_end_io) { - page = resync_fetch_page(rp, rp->idx++); + page = resync_fetch_page(rp, page_idx); /* * won't fail because the vec table is big @@ -2854,7 +2852,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, nr_sectors += len>>9; sector_nr += len>>9; sync_blocks -= (len>>9); - } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); + } while (++page_idx < RESYNC_PAGES); r1_bio->sectors = nr_sectors; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 797ed60abd5e..bfc6db236348 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) resync_get_all_pages(rp); } - rp->idx = 0; rp->raid_bio = r10_bio; bio->bi_private = rp; if (rbio) { @@ -1303,8 +1302,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, sector_t sectors; int max_sectors; - md_write_start(mddev, bio); - /* * Register the new request and wait if the reconstruction * thread has put up a bar for new requests. @@ -1525,7 +1522,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors) raid10_write_request(mddev, bio, r10_bio); } -static void raid10_make_request(struct mddev *mddev, struct bio *bio) +static bool raid10_make_request(struct mddev *mddev, struct bio *bio) { struct r10conf *conf = mddev->private; sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); @@ -1534,9 +1531,12 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { md_flush_request(mddev, bio); - return; + return true; } + if (!md_write_start(mddev, bio)) + return false; + /* * If this request crosses a chunk boundary, we need to split * it. @@ -1553,6 +1553,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); + return true; } static void raid10_status(struct seq_file *seq, struct mddev *mddev) @@ -2851,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sectors_skipped = 0; int chunks_skipped = 0; sector_t chunk_mask = conf->geo.chunk_mask; + int page_idx = 0; if (!conf->r10buf_pool) if (init_resync(conf)) @@ -3353,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, break; for (bio= biolist ; bio ; bio=bio->bi_next) { struct resync_pages *rp = get_resync_pages(bio); - page = resync_fetch_page(rp, rp->idx++); + page = resync_fetch_page(rp, page_idx); /* * won't fail because the vec table is big enough * to hold all these pages @@ -3362,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, } nr_sectors += len>>9; sector_nr += len>>9; - } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); + } while (++page_idx < RESYNC_PAGES); r10_bio->sectors = nr_sectors; while (biolist) { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ec0f951ae19f..e92dd2dc4b5a 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5479,7 +5479,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); bi->bi_next = NULL; - md_write_start(mddev, bi); stripe_sectors = conf->chunk_sectors * (conf->raid_disks - conf->max_degraded); @@ -5549,11 +5548,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) release_stripe_plug(mddev, sh); } - md_write_end(mddev); bio_endio(bi); } -static void raid5_make_request(struct mddev *mddev, struct bio * bi) +static bool raid5_make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; int dd_idx; @@ -5569,10 +5567,10 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) int ret = r5l_handle_flush_request(conf->log, bi); if (ret == 0) - return; + return true; if (ret == -ENODEV) { md_flush_request(mddev, bi); - return; + return true; } /* ret == -EAGAIN, fallback */ /* @@ -5582,6 +5580,8 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) do_flush = bi->bi_opf & REQ_PREFLUSH; } + if (!md_write_start(mddev, bi)) + return false; /* * If array is degraded, better not do chunk aligned read because * later we might have to read it again in order to reconstruct @@ -5591,18 +5591,18 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) mddev->reshape_position == MaxSector) { bi = chunk_aligned_read(mddev, bi); if (!bi) - return; + return true; } if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) { make_discard_request(mddev, bi); - return; + md_write_end(mddev); + return true; } logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); last_sector = bio_end_sector(bi); bi->bi_next = NULL; - md_write_start(mddev, bi); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { @@ -5693,12 +5693,15 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) * userspace, we want an interruptible * wait. */ - flush_signals(current); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_INTERRUPTIBLE); if (logical_sector >= mddev->suspend_lo && logical_sector < mddev->suspend_hi) { + sigset_t full, old; + sigfillset(&full); + sigprocmask(SIG_BLOCK, &full, &old); schedule(); + sigprocmask(SIG_SETMASK, &old, NULL); do_prepare = true; } goto retry; @@ -5740,6 +5743,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) if (rw == WRITE) md_write_end(mddev); bio_endio(bi); + return true; } static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); @@ -6233,6 +6237,8 @@ static void raid5_do_work(struct work_struct *work) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); + + async_tx_issue_pending_all(); blk_finish_plug(&plug); pr_debug("--- raid5worker inactive\n"); @@ -7947,12 +7953,10 @@ static void end_reshape(struct r5conf *conf) { if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { - struct md_rdev *rdev; spin_lock_irq(&conf->device_lock); conf->previous_raid_disks = conf->raid_disks; - rdev_for_each(rdev, conf->mddev) - rdev->data_offset = rdev->new_data_offset; + md_finish_reshape(conf->mddev); smp_wmb(); conf->reshape_progress = MaxSector; conf->mddev->reshape_position = MaxSector; diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c index 73cc7a67a8bc..6df21b29ea17 100644 --- a/drivers/media/pci/cx88/cx88-cards.c +++ b/drivers/media/pci/cx88/cx88-cards.c @@ -3681,7 +3681,14 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr) core->nr = nr; sprintf(core->name, "cx88[%d]", core->nr); - core->tvnorm = V4L2_STD_NTSC_M; + /* + * Note: Setting initial standard here would cause first call to + * cx88_set_tvnorm() to return without programming any registers. Leave + * it blank for at this point and it will get set later in + * cx8800_initdev() + */ + core->tvnorm = 0; + core->width = 320; core->height = 240; core->field = V4L2_FIELD_INTERLACED; diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index c7d4e87ccb64..3c529dd4f333 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1435,7 +1435,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, /* initial device configuration */ mutex_lock(&core->lock); - cx88_set_tvnorm(core, core->tvnorm); + cx88_set_tvnorm(core, V4L2_STD_NTSC_M); v4l2_ctrl_handler_setup(&core->video_hdl); v4l2_ctrl_handler_setup(&core->audio_hdl); cx88_video_mux(core, 0); diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c index b2ff82fa7116..ecfeac5cdbed 100644 --- a/drivers/media/pci/saa7164/saa7164-bus.c +++ b/drivers/media/pci/saa7164/saa7164-bus.c @@ -389,11 +389,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size); msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command); msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector); + memcpy(msg, &msg_tmp, sizeof(*msg)); /* No need to update the read positions, because this was a peek */ /* If the caller specifically want to peek, return */ if (peekonly) { - memcpy(msg, &msg_tmp, sizeof(*msg)); goto peekout; } @@ -438,21 +438,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, space_rem = bus->m_dwSizeGetRing - curr_grp; if (space_rem < sizeof(*msg)) { - /* msg wraps around the ring */ - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem); - memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing, - sizeof(*msg) - space_rem); if (buf) memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) - space_rem, buf_size); } else if (space_rem == sizeof(*msg)) { - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) memcpy_fromio(buf, bus->m_pdwGetRing, buf_size); } else { /* Additional data wraps around the ring */ - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) { memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), space_rem - sizeof(*msg)); @@ -465,15 +459,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg, } else { /* No wrapping */ - memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg)); if (buf) memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg), buf_size); } - /* Convert from little endian to CPU */ - msg->size = le16_to_cpu((__force __le16)msg->size); - msg->command = le32_to_cpu((__force __le32)msg->command); - msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector); /* Update the read positions, adjusting the ring */ saa7164_writel(bus->m_dwGetReadPos, new_grp); diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index e3fe3e0635aa..1831bf5ccca5 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -1719,27 +1719,9 @@ static long vpfe_param_handler(struct file *file, void *priv, switch (cmd) { case VPFE_CMD_S_CCDC_RAW_PARAMS: + ret = -EINVAL; v4l2_warn(&vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n"); - if (ccdc_dev->hw_ops.set_params) { - ret = ccdc_dev->hw_ops.set_params(param); - if (ret) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Error setting parameters in CCDC\n"); - goto unlock_out; - } - ret = vpfe_get_ccdc_image_format(vpfe_dev, - &vpfe_dev->fmt); - if (ret < 0) { - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "Invalid image format at CCDC\n"); - goto unlock_out; - } - } else { - ret = -EINVAL; - v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, - "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); - } + "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n"); break; default: ret = -ENOTTY; diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index 52dc7941db65..1da2c94e1dca 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -1099,10 +1099,10 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, struct s5p_jpeg_ctx *ctx) { int c, components = 0, notfound, n_dht = 0, n_dqt = 0; - unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0, - sof_len = 0; - unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER], - dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER]; + unsigned int height = 0, width = 0, word, subsampling = 0; + unsigned int sos = 0, sof = 0, sof_len = 0; + unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER]; + unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER]; long length; struct s5p_jpeg_buffer jpeg_buffer; diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 3489010601b5..bd76534a2749 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -1722,7 +1722,7 @@ static void imon_incoming_scancode(struct imon_context *ictx, if (kc == KEY_KEYBOARD && !ictx->release_code) { ictx->last_keycode = kc; if (!nomouse) { - ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1; + ictx->pad_mouse = !ictx->pad_mouse; dev_dbg(dev, "toggling to %s mode\n", ictx->pad_mouse ? "mouse" : "keyboard"); spin_unlock_irqrestore(&ictx->kc_lock, flags); diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c index de85f1d7ce43..c01b655571a2 100644 --- a/drivers/media/rc/ir-lirc-codec.c +++ b/drivers/media/rc/ir-lirc-codec.c @@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd, if (!dev->rx_resolution) return -ENOTTY; - val = dev->rx_resolution; + val = dev->rx_resolution / 1000; break; case LIRC_SET_WIDEBAND_RECEIVER: diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c index c8863f36686a..f39cf8cb639f 100644 --- a/drivers/media/rc/ir-spi.c +++ b/drivers/media/rc/ir-spi.c @@ -57,10 +57,13 @@ static int ir_spi_tx(struct rc_dev *dev, /* convert the pulse/space signal to raw binary signal */ for (i = 0; i < count; i++) { + unsigned int periods; int j; u16 val = ((i + 1) % 2) ? idata->pulse : idata->space; - if (len + buffer[i] >= IR_SPI_MAX_BUFSIZE) + periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000); + + if (len + periods >= IR_SPI_MAX_BUFSIZE) return -EINVAL; /* @@ -69,13 +72,13 @@ static int ir_spi_tx(struct rc_dev *dev, * contain a space duration. */ val = (i % 2) ? idata->space : idata->pulse; - for (j = 0; j < buffer[i]; j++) + for (j = 0; j < periods; j++) idata->tx_buf[len++] = val; } memset(&xfer, 0, sizeof(xfer)); - xfer.speed_hz = idata->freq; + xfer.speed_hz = idata->freq * 16; xfer.len = len * sizeof(*idata->tx_buf); xfer.tx_buf = idata->tx_buf; diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 6ec73357fa47..802e559cc30e 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -1703,6 +1703,16 @@ static int rc_setup_rx_device(struct rc_dev *dev) if (dev->close) dev->input_dev->close = ir_close; + dev->input_dev->dev.parent = &dev->dev; + memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id)); + dev->input_dev->phys = dev->input_phys; + dev->input_dev->name = dev->input_name; + + /* rc_open will be called here */ + rc = input_register_device(dev->input_dev); + if (rc) + goto out_table; + /* * Default delay of 250ms is too short for some protocols, especially * since the timeout is currently set to 250ms. Increase it to 500ms, @@ -1718,16 +1728,6 @@ static int rc_setup_rx_device(struct rc_dev *dev) */ dev->input_dev->rep[REP_PERIOD] = 125; - dev->input_dev->dev.parent = &dev->dev; - memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id)); - dev->input_dev->phys = dev->input_phys; - dev->input_dev->name = dev->input_name; - - /* rc_open will be called here */ - rc = input_register_device(dev->input_dev); - if (rc) - goto out_table; - return 0; out_table: diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c index ffb49c28b15a..0eb33e043079 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c @@ -316,7 +316,7 @@ static int mxl111sf_i2c_sw_xfer_msg(struct mxl111sf_state *state, static int mxl111sf_i2c_send_data(struct mxl111sf_state *state, u8 index, u8 *wdata) { - int ret = mxl111sf_ctrl_msg(state->d, wdata[0], + int ret = mxl111sf_ctrl_msg(state, wdata[0], &wdata[1], 25, NULL, 0); mxl_fail(ret); @@ -326,7 +326,7 @@ static int mxl111sf_i2c_send_data(struct mxl111sf_state *state, static int mxl111sf_i2c_get_data(struct mxl111sf_state *state, u8 index, u8 *wdata, u8 *rdata) { - int ret = mxl111sf_ctrl_msg(state->d, wdata[0], + int ret = mxl111sf_ctrl_msg(state, wdata[0], &wdata[1], 25, rdata, 24); mxl_fail(ret); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index abf69d8fa469..b0d5904a4ea6 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -24,9 +24,6 @@ #include "lgdt3305.h" #include "lg2160.h" -/* Max transfer size done by I2C transfer functions */ -#define MAX_XFER_SIZE 64 - int dvb_usb_mxl111sf_debug; module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able))."); @@ -55,27 +52,34 @@ MODULE_PARM_DESC(rfswitch, "force rf switch position (0=auto, 1=ext, 2=int)."); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); -int mxl111sf_ctrl_msg(struct dvb_usb_device *d, +int mxl111sf_ctrl_msg(struct mxl111sf_state *state, u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { + struct dvb_usb_device *d = state->d; int wo = (rbuf == NULL || rlen == 0); /* write-only */ int ret; - u8 sndbuf[MAX_XFER_SIZE]; - if (1 + wlen > sizeof(sndbuf)) { + if (1 + wlen > MXL_MAX_XFER_SIZE) { pr_warn("%s: len=%d is too big!\n", __func__, wlen); return -EOPNOTSUPP; } pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen); - memset(sndbuf, 0, 1+wlen); + mutex_lock(&state->msg_lock); + memset(state->sndbuf, 0, 1+wlen); + memset(state->rcvbuf, 0, rlen); + + state->sndbuf[0] = cmd; + memcpy(&state->sndbuf[1], wbuf, wlen); - sndbuf[0] = cmd; - memcpy(&sndbuf[1], wbuf, wlen); + ret = (wo) ? dvb_usbv2_generic_write(d, state->sndbuf, 1+wlen) : + dvb_usbv2_generic_rw(d, state->sndbuf, 1+wlen, state->rcvbuf, + rlen); + + memcpy(rbuf, state->rcvbuf, rlen); + mutex_unlock(&state->msg_lock); - ret = (wo) ? dvb_usbv2_generic_write(d, sndbuf, 1+wlen) : - dvb_usbv2_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen); mxl_fail(ret); return ret; @@ -91,7 +95,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data) u8 buf[2]; int ret; - ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_READ, &addr, 1, buf, 2); + ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_READ, &addr, 1, buf, 2); if (mxl_fail(ret)) { mxl_debug("error reading reg: 0x%02x", addr); goto fail; @@ -117,7 +121,7 @@ int mxl111sf_write_reg(struct mxl111sf_state *state, u8 addr, u8 data) pr_debug("W: (0x%02x, 0x%02x)\n", addr, data); - ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_WRITE, buf, 2, NULL, 0); + ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_WRITE, buf, 2, NULL, 0); if (mxl_fail(ret)) pr_err("error writing reg: 0x%02x, val: 0x%02x", addr, data); return ret; @@ -926,6 +930,8 @@ static int mxl111sf_init(struct dvb_usb_device *d) .len = sizeof(eeprom), .buf = eeprom }, }; + mutex_init(&state->msg_lock); + ret = get_chip_info(state); if (mxl_fail(ret)) pr_err("failed to get chip info during probe"); diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h index 846260e0eec0..3e6f5880bd1e 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h @@ -19,6 +19,9 @@ #include <media/tveeprom.h> #include <media/media-entity.h> +/* Max transfer size done by I2C transfer functions */ +#define MXL_MAX_XFER_SIZE 64 + #define MXL_EP1_REG_READ 1 #define MXL_EP2_REG_WRITE 2 #define MXL_EP3_INTERRUPT 3 @@ -86,6 +89,9 @@ struct mxl111sf_state { struct mutex fe_lock; u8 num_frontends; struct mxl111sf_adap_state adap_state[3]; + u8 sndbuf[MXL_MAX_XFER_SIZE]; + u8 rcvbuf[MXL_MAX_XFER_SIZE]; + struct mutex msg_lock; #ifdef CONFIG_MEDIA_CONTROLLER_DVB struct media_entity tuner; struct media_pad tuner_pads[2]; @@ -108,7 +114,7 @@ int mxl111sf_ctrl_program_regs(struct mxl111sf_state *state, /* needed for hardware i2c functions in mxl111sf-i2c.c: * mxl111sf_i2c_send_data / mxl111sf_i2c_get_data */ -int mxl111sf_ctrl_msg(struct dvb_usb_device *d, +int mxl111sf_ctrl_msg(struct mxl111sf_state *state, u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen); #define mxl_printk(kern, fmt, arg...) \ diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c index 1dfc2de1fe77..4767f4341ba9 100644 --- a/drivers/media/usb/pulse8-cec/pulse8-cec.c +++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c @@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver"); MODULE_LICENSE("GPL"); static int debug; -static int persistent_config = 1; +static int persistent_config; module_param(debug, int, 0644); module_param(persistent_config, int, 0644); MODULE_PARM_DESC(debug, "debug level (0-1)"); diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index d3fe3ea902d4..eb29113e0bac 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component, struct device *dev) { struct enclosure_component *cdev; + int err; if (!edev || component >= edev->components) return -EINVAL; @@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component, if (cdev->dev == dev) return -EEXIST; - if (cdev->dev) + if (cdev->dev) { enclosure_remove_links(cdev); - - put_device(cdev->dev); + put_device(cdev->dev); + } cdev->dev = get_device(dev); - return enclosure_add_links(cdev); + err = enclosure_add_links(cdev); + if (err) { + put_device(cdev->dev); + cdev->dev = NULL; + } + return err; } EXPORT_SYMBOL_GPL(enclosure_add_device); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 3f8c85d5aa09..88fa03142e92 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -176,19 +176,17 @@ static void mmc_retune_timer(unsigned long data) */ int mmc_of_parse(struct mmc_host *host) { - struct device_node *np; + struct device *dev = host->parent; u32 bus_width; int ret; bool cd_cap_invert, cd_gpio_invert = false; bool ro_cap_invert, ro_gpio_invert = false; - if (!host->parent || !host->parent->of_node) + if (!dev || !dev_fwnode(dev)) return 0; - np = host->parent->of_node; - /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ - if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { + if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) { dev_dbg(host->parent, "\"bus-width\" property is missing, assuming 1 bit.\n"); bus_width = 1; @@ -210,7 +208,7 @@ int mmc_of_parse(struct mmc_host *host) } /* f_max is obtained from the optional "max-frequency" property */ - of_property_read_u32(np, "max-frequency", &host->f_max); + device_property_read_u32(dev, "max-frequency", &host->f_max); /* * Configure CD and WP pins. They are both by default active low to @@ -225,12 +223,12 @@ int mmc_of_parse(struct mmc_host *host) */ /* Parse Card Detection */ - if (of_property_read_bool(np, "non-removable")) { + if (device_property_read_bool(dev, "non-removable")) { host->caps |= MMC_CAP_NONREMOVABLE; } else { - cd_cap_invert = of_property_read_bool(np, "cd-inverted"); + cd_cap_invert = device_property_read_bool(dev, "cd-inverted"); - if (of_property_read_bool(np, "broken-cd")) + if (device_property_read_bool(dev, "broken-cd")) host->caps |= MMC_CAP_NEEDS_POLL; ret = mmc_gpiod_request_cd(host, "cd", 0, true, @@ -256,7 +254,7 @@ int mmc_of_parse(struct mmc_host *host) } /* Parse Write Protection */ - ro_cap_invert = of_property_read_bool(np, "wp-inverted"); + ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); if (!ret) @@ -264,64 +262,64 @@ int mmc_of_parse(struct mmc_host *host) else if (ret != -ENOENT && ret != -ENOSYS) return ret; - if (of_property_read_bool(np, "disable-wp")) + if (device_property_read_bool(dev, "disable-wp")) host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; /* See the comment on CD inversion above */ if (ro_cap_invert ^ ro_gpio_invert) host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - if (of_property_read_bool(np, "cap-sd-highspeed")) + if (device_property_read_bool(dev, "cap-sd-highspeed")) host->caps |= MMC_CAP_SD_HIGHSPEED; - if (of_property_read_bool(np, "cap-mmc-highspeed")) + if (device_property_read_bool(dev, "cap-mmc-highspeed")) host->caps |= MMC_CAP_MMC_HIGHSPEED; - if (of_property_read_bool(np, "sd-uhs-sdr12")) + if (device_property_read_bool(dev, "sd-uhs-sdr12")) host->caps |= MMC_CAP_UHS_SDR12; - if (of_property_read_bool(np, "sd-uhs-sdr25")) + if (device_property_read_bool(dev, "sd-uhs-sdr25")) host->caps |= MMC_CAP_UHS_SDR25; - if (of_property_read_bool(np, "sd-uhs-sdr50")) + if (device_property_read_bool(dev, "sd-uhs-sdr50")) host->caps |= MMC_CAP_UHS_SDR50; - if (of_property_read_bool(np, "sd-uhs-sdr104")) + if (device_property_read_bool(dev, "sd-uhs-sdr104")) host->caps |= MMC_CAP_UHS_SDR104; - if (of_property_read_bool(np, "sd-uhs-ddr50")) + if (device_property_read_bool(dev, "sd-uhs-ddr50")) host->caps |= MMC_CAP_UHS_DDR50; - if (of_property_read_bool(np, "cap-power-off-card")) + if (device_property_read_bool(dev, "cap-power-off-card")) host->caps |= MMC_CAP_POWER_OFF_CARD; - if (of_property_read_bool(np, "cap-mmc-hw-reset")) + if (device_property_read_bool(dev, "cap-mmc-hw-reset")) host->caps |= MMC_CAP_HW_RESET; - if (of_property_read_bool(np, "cap-sdio-irq")) + if (device_property_read_bool(dev, "cap-sdio-irq")) host->caps |= MMC_CAP_SDIO_IRQ; - if (of_property_read_bool(np, "full-pwr-cycle")) + if (device_property_read_bool(dev, "full-pwr-cycle")) host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; - if (of_property_read_bool(np, "keep-power-in-suspend")) + if (device_property_read_bool(dev, "keep-power-in-suspend")) host->pm_caps |= MMC_PM_KEEP_POWER; - if (of_property_read_bool(np, "wakeup-source") || - of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */ + if (device_property_read_bool(dev, "wakeup-source") || + device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */ host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; - if (of_property_read_bool(np, "mmc-ddr-3_3v")) + if (device_property_read_bool(dev, "mmc-ddr-3_3v")) host->caps |= MMC_CAP_3_3V_DDR; - if (of_property_read_bool(np, "mmc-ddr-1_8v")) + if (device_property_read_bool(dev, "mmc-ddr-1_8v")) host->caps |= MMC_CAP_1_8V_DDR; - if (of_property_read_bool(np, "mmc-ddr-1_2v")) + if (device_property_read_bool(dev, "mmc-ddr-1_2v")) host->caps |= MMC_CAP_1_2V_DDR; - if (of_property_read_bool(np, "mmc-hs200-1_8v")) + if (device_property_read_bool(dev, "mmc-hs200-1_8v")) host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; - if (of_property_read_bool(np, "mmc-hs200-1_2v")) + if (device_property_read_bool(dev, "mmc-hs200-1_2v")) host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; - if (of_property_read_bool(np, "mmc-hs400-1_8v")) + if (device_property_read_bool(dev, "mmc-hs400-1_8v")) host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; - if (of_property_read_bool(np, "mmc-hs400-1_2v")) + if (device_property_read_bool(dev, "mmc-hs400-1_2v")) host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; - if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe")) + if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe")) host->caps2 |= MMC_CAP2_HS400_ES; - if (of_property_read_bool(np, "no-sdio")) + if (device_property_read_bool(dev, "no-sdio")) host->caps2 |= MMC_CAP2_NO_SDIO; - if (of_property_read_bool(np, "no-sd")) + if (device_property_read_bool(dev, "no-sd")) host->caps2 |= MMC_CAP2_NO_SD; - if (of_property_read_bool(np, "no-mmc")) + if (device_property_read_bool(dev, "no-mmc")) host->caps2 |= MMC_CAP2_NO_MMC; - host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr); + host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr); if (host->dsr_req && (host->dsr & ~0xffff)) { dev_err(host->parent, "device tree specified broken value for DSR: 0x%x, ignoring\n", diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 2c87dede5841..f451094acb8d 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1288,7 +1288,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card) static int mmc_select_hs400es(struct mmc_card *card) { struct mmc_host *host = card->host; - int err = 0; + int err = -EINVAL; u8 val; if (!(host->caps & MMC_CAP_8_BIT_DATA)) { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index e45129f48174..efde0f20dd24 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2707,8 +2707,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) host->slot[id] = slot; mmc->ops = &dw_mci_ops; - if (of_property_read_u32_array(host->dev->of_node, - "clock-freq-min-max", freq, 2)) { + if (device_property_read_u32_array(host->dev, "clock-freq-min-max", + freq, 2)) { mmc->f_min = DW_MCI_FREQ_MIN; mmc->f_max = DW_MCI_FREQ_MAX; } else { @@ -2808,7 +2808,6 @@ static void dw_mci_init_dma(struct dw_mci *host) { int addr_config; struct device *dev = host->dev; - struct device_node *np = dev->of_node; /* * Check tansfer mode from HCON[17:16] @@ -2869,8 +2868,9 @@ static void dw_mci_init_dma(struct dw_mci *host) dev_info(host->dev, "Using internal DMA controller.\n"); } else { /* TRANS_MODE_EDMAC: check dma bindings again */ - if ((of_property_count_strings(np, "dma-names") < 0) || - (!of_find_property(np, "dmas", NULL))) { + if ((device_property_read_string_array(dev, "dma-names", + NULL, 0) < 0) || + !device_property_present(dev, "dmas")) { goto no_dma; } host->dma_ops = &dw_mci_edmac_ops; @@ -2937,7 +2937,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) { struct dw_mci_board *pdata; struct device *dev = host->dev; - struct device_node *np = dev->of_node; const struct dw_mci_drv_data *drv_data = host->drv_data; int ret; u32 clock_frequency; @@ -2954,20 +2953,21 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) } /* find out number of slots supported */ - of_property_read_u32(np, "num-slots", &pdata->num_slots); + device_property_read_u32(dev, "num-slots", &pdata->num_slots); - if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) + if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) dev_info(dev, "fifo-depth property not found, using value of FIFOTH register as default\n"); - of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); + device_property_read_u32(dev, "card-detect-delay", + &pdata->detect_delay_ms); - of_property_read_u32(np, "data-addr", &host->data_addr_override); + device_property_read_u32(dev, "data-addr", &host->data_addr_override); - if (of_get_property(np, "fifo-watermark-aligned", NULL)) + if (device_property_present(dev, "fifo-watermark-aligned")) host->wm_aligned = true; - if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) + if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency)) pdata->bus_hz = clock_frequency; if (drv_data && drv_data->parse_dt) { diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 7611fd679f1a..1485530c3592 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -31,6 +31,7 @@ #define SDMMC_MC1R 0x204 #define SDMMC_MC1R_DDR BIT(3) +#define SDMMC_MC1R_FCD BIT(7) #define SDMMC_CACR 0x230 #define SDMMC_CACR_CAPWREN BIT(0) #define SDMMC_CACR_KEY (0x46 << 8) @@ -43,6 +44,15 @@ struct sdhci_at91_priv { struct clk *mainck; }; +static void sdhci_at91_set_force_card_detect(struct sdhci_host *host) +{ + u8 mc1r; + + mc1r = readb(host->ioaddr + SDMMC_MC1R); + mc1r |= SDMMC_MC1R_FCD; + writeb(mc1r, host->ioaddr + SDMMC_MC1R); +} + static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) { u16 clk; @@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) sdhci_set_uhs_signaling(host, timing); } +static void sdhci_at91_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_reset(host, mask); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdhci_at91_set_force_card_detect(host); +} + static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_clock = sdhci_at91_set_clock, .set_bus_width = sdhci_set_bus_width, - .reset = sdhci_reset, + .reset = sdhci_at91_reset, .set_uhs_signaling = sdhci_at91_set_uhs_signaling, .set_power = sdhci_at91_set_power, }; @@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev) host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } + /* + * If the device attached to the MMC bus is not removable, it is safer + * to set the Force Card Detect bit. People often don't connect the + * card detect signal and use this pin for another purpose. If the card + * detect pin is not muxed to SDHCI controller, a default value is + * used. This value can be different from a SoC revision to another + * one. Problems come when this default value is not card present. To + * avoid this case, if the device is non removable then the card + * detection procedure using the SDMCC_CD signal is bypassed. + * This bit is reset when a software reset for all command is performed + * so we need to implement our own reset function to set back this bit. + */ + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + sdhci_at91_set_force_card_detect(host); + pm_runtime_put_autosuspend(&pdev->dev); return 0; diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d6fa2214aaae..0fb4e4c119e1 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, } mmc_writel(host, REG_CLKCR, rval); - if (host->cfg->needs_new_timings) - mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE); + if (host->cfg->needs_new_timings) { + /* Don't touch the delay bits */ + rval = mmc_readl(host, REG_SD_NTSR); + rval |= SDXC_2X_TIMING_MODE; + mmc_writel(host, REG_SD_NTSR, rval); + } ret = sunxi_mmc_clk_set_phase(host, ios, rate); if (ret) diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index a2d92f10501b..a3d20e39e5b5 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -404,30 +404,29 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host, * Transfer the data */ if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) { - u8 data[4] = { }; + u32 data = 0; + u32 *buf32 = (u32 *)buf; if (is_read) - sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf, + sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32, count >> 2); else - sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf, + sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32, count >> 2); /* if count was multiple of 4 */ if (!(count & 0x3)) return; - buf8 = (u8 *)(buf + (count >> 2)); + buf32 += count >> 2; count %= 4; if (is_read) { - sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, - (u32 *)data, 1); - memcpy(buf8, data, count); + sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1); + memcpy(buf32, &data, count); } else { - memcpy(data, buf8, count); - sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, - (u32 *)data, 1); + memcpy(&data, buf32, count); + sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1); } return; diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 55a8ee5306ea..8c210a5776bc 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) */ struct platform_device *pdev = to_platform_device(userdev); const struct atmel_pmecc_caps *caps; + const struct of_device_id *match; /* No PMECC engine available. */ if (!of_property_read_bool(userdev->of_node, @@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev) caps = &at91sam9g45_caps; - /* - * Try to find the NFC subnode and extract the associated caps - * from there. - */ - np = of_find_compatible_node(userdev->of_node, NULL, - "atmel,sama5d3-nfc"); - if (np) { - const struct of_device_id *match; - - match = of_match_node(atmel_pmecc_legacy_match, np); - if (match && match->data) - caps = match->data; - - of_node_put(np); - } + /* Find the caps associated to the NAND dev node. */ + match = of_match_node(atmel_pmecc_legacy_match, + userdev->of_node); + if (match && match->data) + caps = match->data; pmecc = atmel_pmecc_create(pdev, caps, 1, 2); } diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index b1dd12729f19..6f9771e82476 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section, if (!section) { oobregion->offset = 0; - oobregion->length = 4; + if (mtd->oobsize == 16) + oobregion->length = 4; + else + oobregion->length = 3; } else { + if (mtd->oobsize == 8) + return -ERANGE; + oobregion->offset = 6; oobregion->length = ecc->total - 4; } @@ -1102,7 +1108,9 @@ static int nand_setup_data_interface(struct nand_chip *chip) * Ensure the timing mode has been changed on the chip side * before changing timings on the controller side. */ - if (chip->onfi_version) { + if (chip->onfi_version && + (le16_to_cpu(chip->onfi_params.opt_cmd) & + ONFI_OPT_CMD_SET_GET_FEATURES)) { u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { chip->onfi_timing_mode_default, }; diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c index f06312df3669..7e36d7d13c26 100644 --- a/drivers/mtd/nand/nand_timings.c +++ b/drivers/mtd/nand/nand_timings.c @@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip, struct nand_sdr_timings *timings = &iface->timings.sdr; /* microseconds -> picoseconds */ - timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); - timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); - timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); + timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog); + timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers); + timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r); /* nanoseconds -> picoseconds */ timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 49b286c6c10f..022e3d494741 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -340,7 +340,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos) if (!*buf) { /* skip over "len" bytes */ - chip->cmdfunc(mtd, NAND_CMD_SEQIN, *pos, -1); + chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1); } else { tango_write_buf(mtd, *buf, len); *buf += len; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8ab6bdbe1682..510a580e0348 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->delay = 0; new_slave->link_failure_count = 0; - if (bond_update_speed_duplex(new_slave)) + if (bond_update_speed_duplex(new_slave) && + bond_needs_speed_duplex(bond)) new_slave->link = BOND_LINK_DOWN; new_slave->last_rx = jiffies - @@ -2047,6 +2048,7 @@ static int bond_miimon_inspect(struct bonding *bond) continue; bond_propose_link_state(slave, BOND_LINK_FAIL); + commit++; slave->delay = bond->params.downdelay; if (slave->delay) { netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", @@ -2085,6 +2087,7 @@ static int bond_miimon_inspect(struct bonding *bond) continue; bond_propose_link_state(slave, BOND_LINK_BACK); + commit++; slave->delay = bond->params.updelay; if (slave->delay) { @@ -2135,11 +2138,13 @@ static void bond_miimon_commit(struct bonding *bond) continue; case BOND_LINK_UP: - if (bond_update_speed_duplex(slave)) { + if (bond_update_speed_duplex(slave) && + bond_needs_speed_duplex(bond)) { slave->link = BOND_LINK_DOWN; - netdev_warn(bond->dev, - "failed to get link speed/duplex for %s\n", - slave->dev->name); + if (net_ratelimit()) + netdev_warn(bond->dev, + "failed to get link speed/duplex for %s\n", + slave->dev->name); continue; } bond_set_slave_link_state(slave, BOND_LINK_UP, @@ -4598,7 +4603,7 @@ static int bond_check_params(struct bond_params *params) } ad_user_port_key = valptr->value; - if (bond_mode == BOND_MODE_TLB) { + if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { bond_opt_initstr(&newval, "default"); valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index fa0eece21eef..d9cc94a7d44e 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1668,6 +1668,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .dev_name = "BCM53125", .vlans = 4096, .enabled_ports = 0xff, + .arl_entries = 4, .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d034d8cd7d22..32864a47c4c1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3377,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_jumbo_config = mv88e6165_port_jumbo_config, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_config = mv88e6390_port_pause_config, + .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 5711fbbd6ae3..878cffd37e1f 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev) mac_mode |= HALF_DUPLEX; if (gigabit) { - if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (phy_interface_is_rgmii(dev->phydev)) mac_mode |= RGMII_MODE; mac_mode |= GMAC_MODE; @@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev) break; case PHY_INTERFACE_MODE_RGMII: - pad_mode = PAD_MODE_RGMII; - break; - + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; + pad_mode = PAD_MODE_RGMII; break; default: diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 962dcbcef8b5..0dcda0b9b0cc 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -221,7 +221,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct) /* Wait for 100ms as Octeon resets. */ mdelay(100); - if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1) == 0x1234ULL) { + if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) { dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n", oct->octeon_id); return 1; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index bdec051107a6..d62a5096768e 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -44,7 +44,7 @@ int lio_cn6xxx_soft_reset(struct octeon_device *oct) /* Wait for 10ms as Octeon resets. */ mdelay(100); - if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) { + if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) { dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); return 1; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 53309f659951..0ac1a5500fc3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2055,12 +2055,12 @@ static void detach_ulds(struct adapter *adap) mutex_lock(&uld_mutex); list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) - if (adap->uld && adap->uld[i].handle) { + if (adap->uld && adap->uld[i].handle) adap->uld[i].state_change(adap->uld[i].handle, CXGB4_STATE_DETACH); - adap->uld[i].handle = NULL; - } + if (netevent_registered && list_empty(&adapter_list)) { unregister_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = false; @@ -5086,8 +5086,10 @@ static void remove_one(struct pci_dev *pdev) */ destroy_workqueue(adapter->workq); - if (is_uld(adapter)) + if (is_uld(adapter)) { detach_ulds(adapter); + t4_uld_clean_up(adapter); + } disable_interrupts(adapter); @@ -5164,7 +5166,11 @@ static void shutdown_one(struct pci_dev *pdev) if (adapter->port[i]->reg_state == NETREG_REGISTERED) cxgb_close(adapter->port[i]); - t4_uld_clean_up(adapter); + if (is_uld(adapter)) { + detach_ulds(adapter); + t4_uld_clean_up(adapter); + } + disable_interrupts(adapter); disable_msi(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index d0868c2320da..dbbc2b7f0d46 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -589,22 +589,37 @@ void t4_uld_mem_free(struct adapter *adap) kfree(adap->uld); } +/* This function should be called with uld_mutex taken. */ +static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) +{ + if (adap->uld[type].handle) { + adap->uld[type].handle = NULL; + adap->uld[type].add = NULL; + release_sge_txq_uld(adap, type); + + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); + + free_sge_queues_uld(adap, type); + free_queues_uld(adap, type); + } +} + void t4_uld_clean_up(struct adapter *adap) { unsigned int i; - if (!adap->uld) - return; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) continue; - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, i); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, i); - free_sge_queues_uld(adap, i); - free_queues_uld(adap, i); + + cxgb4_shutdown_uld_adapter(adap, i); } + mutex_unlock(&uld_mutex); } static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) @@ -782,15 +797,8 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) continue; if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) continue; - adap->uld[type].handle = NULL; - adap->uld[type].add = NULL; - release_sge_txq_uld(adap, type); - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, type); - free_sge_queues_uld(adap, type); - free_queues_uld(adap, type); + + cxgb4_shutdown_uld_adapter(adap, type); } mutex_unlock(&uld_mutex); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index ee443985581f..4a50870e0fa7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -257,6 +257,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) } /* Set phy->phy_addr and phy->id. */ + igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0); ret_val = igb_get_phy_id_82575(hw); if (ret_val) return ret_val; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 77abd1813047..802f0e8bff3a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -572,16 +572,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum, * header, the HW adds it. To address that, we are subtracting the pseudo * header checksum from the checksum value provided by the HW. */ -static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, - struct iphdr *iph) +static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, + struct iphdr *iph) { __u16 length_for_csum = 0; __wsum csum_pseudo_header = 0; + __u8 ipproto = iph->protocol; + + if (unlikely(ipproto == IPPROTO_SCTP)) + return -1; length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, - length_for_csum, iph->protocol, 0); + length_for_csum, ipproto, 0); skb->csum = csum_sub(hw_checksum, csum_pseudo_header); + return 0; } #if IS_ENABLED(CONFIG_IPV6) @@ -592,17 +597,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, struct ipv6hdr *ipv6h) { + __u8 nexthdr = ipv6h->nexthdr; __wsum csum_pseudo_hdr = 0; - if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || - ipv6h->nexthdr == IPPROTO_HOPOPTS)) + if (unlikely(nexthdr == IPPROTO_FRAGMENT || + nexthdr == IPPROTO_HOPOPTS || + nexthdr == IPPROTO_SCTP)) return -1; - hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); csum_pseudo_hdr = csum_partial(&ipv6h->saddr, sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); + csum_pseudo_hdr = csum_add(csum_pseudo_hdr, + (__force __wsum)htons(nexthdr)); skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); @@ -625,11 +633,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, } if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) - get_fixed_ipv4_csum(hw_checksum, skb, hdr); + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); #if IS_ENABLED(CONFIG_IPV6) - else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) - if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) - return -1; + if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) + return get_fixed_ipv6_csum(hw_checksum, skb, hdr); #endif return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 83aab1e4c8c8..9f214f9fb48c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) /* Virtual PCI function needs to determine UAR page size from * firmware. Only master PCI function can set the uar page size */ - if (enable_4k_uar) + if (enable_4k_uar || !dev->persist->num_vfs) dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; else dev->uar_page_shift = PAGE_SHIFT; @@ -2275,7 +2275,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; - if (enable_4k_uar) { + if (enable_4k_uar || !dev->persist->num_vfs) { init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 10d282841f5b..ac0a460c006a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -777,6 +777,10 @@ static void cb_timeout_handler(struct work_struct *work) mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); } +static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); +static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, + struct mlx5_cmd_msg *msg); + static void cmd_work_handler(struct work_struct *work) { struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); @@ -786,16 +790,27 @@ static void cmd_work_handler(struct work_struct *work) struct mlx5_cmd_layout *lay; struct semaphore *sem; unsigned long flags; + int alloc_ret; sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { - ent->idx = alloc_ent(cmd); - if (ent->idx < 0) { + alloc_ret = alloc_ent(cmd); + if (alloc_ret < 0) { mlx5_core_err(dev, "failed to allocate command entry\n"); + if (ent->callback) { + ent->callback(-EAGAIN, ent->context); + mlx5_free_cmd_msg(dev, ent->out); + free_msg(dev, ent->in); + free_cmd(ent); + } else { + ent->ret = -EAGAIN; + complete(&ent->done); + } up(sem); return; } + ent->idx = alloc_ret; } else { ent->idx = cmd->max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); @@ -955,7 +970,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, err = wait_func(dev, ent); if (err == -ETIMEDOUT) - goto out_free; + goto out; ds = ent->ts2 - ent->ts1; op = MLX5_GET(mbox_in, in->first.data, opcode); @@ -1419,6 +1434,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", ent->idx); free_ent(cmd, ent->idx); + free_cmd(ent); } continue; } @@ -1477,7 +1493,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) free_msg(dev, ent->in); err = err ? err : ent->status; - free_cmd(ent); + if (!forced) + free_cmd(ent); callback(err, context); } else { complete(&ent->done); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 944fc1742464..3b39dbd97e57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -261,6 +261,14 @@ struct mlx5e_dcbx { }; #endif +#define MAX_PIN_NUM 8 +struct mlx5e_pps { + u8 pin_caps[MAX_PIN_NUM]; + struct work_struct out_work; + u64 start[MAX_PIN_NUM]; + u8 enabled; +}; + struct mlx5e_tstamp { rwlock_t lock; struct cyclecounter cycles; @@ -272,7 +280,7 @@ struct mlx5e_tstamp { struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; - u8 *pps_pin_caps; + struct mlx5e_pps pps_info; }; enum { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index e706a87fc8b2..80c500f87ab6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -53,6 +53,15 @@ enum { MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, }; +enum { + MLX5E_MTPPS_FS_ENABLE = BIT(0x0), + MLX5E_MTPPS_FS_PATTERN = BIT(0x2), + MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3), + MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4), + MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), + MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), +}; + void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, struct skb_shared_hwtstamps *hwts) { @@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; } +static void mlx5e_pps_out(struct work_struct *work) +{ + struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps, + out_work); + struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp, + pps_info); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + unsigned long flags; + int i; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + u64 tstart; + + write_lock_irqsave(&tstamp->lock, flags); + tstart = tstamp->pps_info.start[i]; + tstamp->pps_info.start[i] = 0; + write_unlock_irqrestore(&tstamp->lock, flags); + if (!tstart) + continue; + + MLX5_SET(mtpps_reg, in, pin, i); + MLX5_SET64(mtpps_reg, in, time_stamp, tstart); + MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP); + mlx5_set_mtpps(tstamp->mdev, in, sizeof(in)); + } +} + static void mlx5e_timestamp_overflow(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, overflow_work); + struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); unsigned long flags; write_lock_irqsave(&tstamp->lock, flags); timecounter_read(&tstamp->clock); write_unlock_irqrestore(&tstamp->lock, flags); - schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); + queue_delayed_work(priv->wq, &tstamp->overflow_work, + msecs_to_jiffies(tstamp->overflow_period * 1000)); } int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) @@ -214,18 +252,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) int neg_adj = 0; struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, ptp_info); - struct mlx5e_priv *priv = - container_of(tstamp, struct mlx5e_priv, tstamp); - - if (MLX5_CAP_GEN(priv->mdev, pps_modify)) { - u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - - /* For future use need to add a loop for finding all 1PPS out pins */ - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); - MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF); - - mlx5_set_mtpps(priv->mdev, in, sizeof(in)); - } if (delta < 0) { neg_adj = 1; @@ -254,12 +280,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u32 field_select = 0; + u8 pin_mode = 0; u8 pattern = 0; int pin = -1; int err = 0; - if (!MLX5_CAP_GEN(priv->mdev, pps) || - !MLX5_CAP_GEN(priv->mdev, pps_modify)) + if (!MLX5_PPS_CAP(priv->mdev)) return -EOPNOTSUPP; if (rq->extts.index >= tstamp->ptp_info.n_pins) @@ -269,15 +296,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); if (pin < 0) return -EBUSY; + pin_mode = MLX5E_PIN_MODE_IN; + pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); + field_select = MLX5E_MTPPS_FS_PIN_MODE | + MLX5E_MTPPS_FS_PATTERN | + MLX5E_MTPPS_FS_ENABLE; + } else { + pin = rq->extts.index; + field_select = MLX5E_MTPPS_FS_ENABLE; } - if (rq->extts.flags & PTP_FALLING_EDGE) - pattern = 1; - MLX5_SET(mtpps_reg, in, pin, pin); - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); MLX5_SET(mtpps_reg, in, pattern, pattern); MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET(mtpps_reg, in, field_select, field_select); err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); if (err) @@ -296,14 +329,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - u64 nsec_now, nsec_delta, time_stamp; + u64 nsec_now, nsec_delta, time_stamp = 0; u64 cycles_now, cycles_delta; struct timespec64 ts; unsigned long flags; + u32 field_select = 0; + u8 pin_mode = 0; + u8 pattern = 0; int pin = -1; + int err = 0; s64 ns; - if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) + if (!MLX5_PPS_CAP(priv->mdev)) return -EOPNOTSUPP; if (rq->perout.index >= tstamp->ptp_info.n_pins) @@ -314,32 +351,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, rq->perout.index); if (pin < 0) return -EBUSY; - } - ts.tv_sec = rq->perout.period.sec; - ts.tv_nsec = rq->perout.period.nsec; - ns = timespec64_to_ns(&ts); - if (on) + pin_mode = MLX5E_PIN_MODE_OUT; + pattern = MLX5E_OUT_PATTERN_PERIODIC; + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + if ((ns >> 1) != 500000000LL) return -EINVAL; - ts.tv_sec = rq->perout.start.sec; - ts.tv_nsec = rq->perout.start.nsec; - ns = timespec64_to_ns(&ts); - cycles_now = mlx5_read_internal_timer(tstamp->mdev); - write_lock_irqsave(&tstamp->lock, flags); - nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); - nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, - tstamp->cycles.mult); - write_unlock_irqrestore(&tstamp->lock, flags); - time_stamp = cycles_now + cycles_delta; + + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + ns = timespec64_to_ns(&ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + write_unlock_irqrestore(&tstamp->lock, flags); + time_stamp = cycles_now + cycles_delta; + field_select = MLX5E_MTPPS_FS_PIN_MODE | + MLX5E_MTPPS_FS_PATTERN | + MLX5E_MTPPS_FS_ENABLE | + MLX5E_MTPPS_FS_TIME_STAMP; + } else { + pin = rq->perout.index; + field_select = MLX5E_MTPPS_FS_ENABLE; + } + MLX5_SET(mtpps_reg, in, pin, pin); - MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); - MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); + MLX5_SET(mtpps_reg, in, pattern, pattern); MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); + MLX5_SET(mtpps_reg, in, field_select, field_select); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; - return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_pps_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + + tstamp->pps_info.enabled = !!on; + return 0; } static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, @@ -351,6 +416,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, return mlx5e_extts_configure(ptp, rq, on); case PTP_CLK_REQ_PEROUT: return mlx5e_perout_configure(ptp, rq, on); + case PTP_CLK_REQ_PPS: + return mlx5e_pps_configure(ptp, rq, on); default: return -EOPNOTSUPP; } @@ -396,6 +463,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) return -ENOMEM; tstamp->ptp_info.enable = mlx5e_ptp_enable; tstamp->ptp_info.verify = mlx5e_ptp_verify; + tstamp->ptp_info.pps = 1; for (i = 0; i < tstamp->ptp_info.n_pins; i++) { snprintf(tstamp->ptp_info.pin_config[i].name, @@ -423,22 +491,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, cap_max_num_of_pps_out_pins); - tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); - tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); - tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); - tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); - tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); - tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); - tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); - tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); + tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); } void mlx5e_pps_event_handler(struct mlx5e_priv *priv, struct ptp_clock_event *event) { + struct net_device *netdev = priv->netdev; struct mlx5e_tstamp *tstamp = &priv->tstamp; + struct timespec64 ts; + u64 nsec_now, nsec_delta; + u64 cycles_now, cycles_delta; + int pin = event->index; + s64 ns; + unsigned long flags; - ptp_clock_event(tstamp->ptp, event); + switch (tstamp->ptp_info.pin_config[pin].func) { + case PTP_PF_EXTTS: + if (tstamp->pps_info.enabled) { + event->type = PTP_CLOCK_PPSUSR; + event->pps_times.ts_real = ns_to_timespec64(event->timestamp); + } else { + event->type = PTP_CLOCK_EXTTS; + } + ptp_clock_event(tstamp->ptp, event); + break; + case PTP_PF_PEROUT: + mlx5e_ptp_gettime(&tstamp->ptp_info, &ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + ts.tv_sec += 1; + ts.tv_nsec = 0; + ns = timespec64_to_ns(&ts); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + tstamp->pps_info.start[pin] = cycles_now + cycles_delta; + queue_work(priv->wq, &tstamp->pps_info.out_work); + write_unlock_irqrestore(&tstamp->lock, flags); + break; + default: + netdev_err(netdev, "%s: Unhandled event\n", __func__); + } } void mlx5e_timestamp_init(struct mlx5e_priv *priv) @@ -474,9 +576,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) do_div(ns, NSEC_PER_SEC / 2 / HZ); tstamp->overflow_period = ns; + INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); if (tstamp->overflow_period) - schedule_delayed_work(&tstamp->overflow_work, 0); + queue_delayed_work(priv->wq, &tstamp->overflow_work, 0); else mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); @@ -485,16 +588,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); /* Initialize 1PPS data structures */ -#define MAX_PIN_NUM 8 - tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); - if (tstamp->pps_pin_caps) { - if (MLX5_CAP_GEN(priv->mdev, pps)) - mlx5e_get_pps_caps(priv, tstamp); - if (tstamp->ptp_info.n_pins) - mlx5e_init_pin_config(tstamp); - } else { - mlx5_core_warn(priv->mdev, "1PPS initialization failed\n"); - } + if (MLX5_PPS_CAP(priv->mdev)) + mlx5e_get_pps_caps(priv, tstamp); + if (tstamp->ptp_info.n_pins) + mlx5e_init_pin_config(tstamp); tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, &priv->mdev->pdev->dev); @@ -517,8 +614,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) priv->tstamp.ptp = NULL; } - kfree(tstamp->pps_pin_caps); - kfree(tstamp->ptp_info.pin_config); - + cancel_work_sync(&tstamp->pps_info.out_work); cancel_delayed_work_sync(&tstamp->overflow_work); + kfree(tstamp->ptp_info.pin_config); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 8fa23f6a1f67..2eb54d36e16e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -464,6 +464,8 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev, if (!perm_addr) return; + memset(perm_addr, 0xff, MAX_ADDR_LEN); + mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 85bf4a389295..986387de13ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv, static bool outer_header_zero(u32 *match_criteria) { - int size = MLX5_ST_SZ_BYTES(fte_match_param); + int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers); char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers); @@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0); if (IS_ERR(rule)) { err = PTR_ERR(rule); netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 277f4de30375..072aa8a13a0a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -365,7 +365,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, break; case MLX5_DEV_EVENT_PPS: eqe = (struct mlx5_eqe *)param; - ptp_event.type = PTP_CLOCK_EXTTS; ptp_event.index = eqe->data.pps.pin; ptp_event.timestamp = timecounter_cyc2time(&priv->tstamp.clock, @@ -3053,8 +3052,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); - stats->tx_carrier_errors = - PPORT_802_3_GET(pstats, a_symbol_error_during_carrier); stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + stats->rx_frame_errors; stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 33eae5ad2fb0..58a9f5c96d10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -690,7 +690,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) else mlx5_core_dbg(dev, "port_module_event is not set\n"); - if (MLX5_CAP_GEN(dev, pps)) + if (MLX5_PPS_CAP(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index f27f84ffbc85..8a8b5f0e497c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -67,6 +67,7 @@ enum { enum { MLX5_DROP_NEW_HEALTH_WORK, + MLX5_DROP_NEW_RECOVERY_WORK, }; static u8 get_nic_state(struct mlx5_core_dev *dev) @@ -193,7 +194,7 @@ static void health_care(struct work_struct *work) mlx5_handle_bad_state(dev); spin_lock(&health->wq_lock); - if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) + if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) schedule_delayed_work(&health->recover_work, recover_delay); else dev_err(&dev->pdev->dev, @@ -313,6 +314,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) init_timer(&health->timer); health->sick = 0; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; @@ -335,11 +337,22 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev) spin_lock(&health->wq_lock); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); spin_unlock(&health->wq_lock); cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); } +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + + spin_lock(&health->wq_lock); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); + spin_unlock(&health->wq_lock); + cancel_delayed_work_sync(&dev->priv.health.recover_work); +} + void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c index cc1858752e70..6d90e9e3bfd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib.c @@ -160,8 +160,6 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) { - mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn); - mlx5_core_destroy_qp(mdev, qp); } @@ -176,8 +174,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return err; } - mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); - err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); @@ -235,6 +231,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { + struct mlx5i_priv *ipriv = priv->ppriv; int err; err = mlx5e_create_indirect_rqt(priv); @@ -253,12 +250,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_indirect_tirs; - err = mlx5i_create_flow_steering(priv); + err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); if (err) goto err_destroy_direct_tirs; + err = mlx5i_create_flow_steering(priv); + if (err) + goto err_remove_rx_underlay_qpn; + return 0; +err_remove_rx_underlay_qpn: + mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: @@ -272,6 +275,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { + struct mlx5i_priv *ipriv = priv->ppriv; + + mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_indirect_tirs(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index b5d5519542e8..0ca4623bda6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, u8 *port1, u8 *port2) { - if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { - if (tracker->netdev_state[0].tx_enabled) { - *port1 = 1; - *port2 = 1; - } else { - *port1 = 2; - *port2 = 2; - } - } else { - *port1 = 1; - *port2 = 2; - if (!tracker->netdev_state[0].link_up) - *port1 = 2; - else if (!tracker->netdev_state[1].link_up) - *port2 = 1; + *port1 = 1; + *port2 = 2; + if (!tracker->netdev_state[0].tx_enabled || + !tracker->netdev_state[0].link_up) { + *port1 = 2; + return; } + + if (!tracker->netdev_state[1].tx_enabled || + !tracker->netdev_state[1].link_up) + *port2 = 1; } static void mlx5_activate_lag(struct mlx5_lag *ldev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 13be264587f1..524c16f72e83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1020,7 +1020,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, if (err) { dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", FW_PRE_INIT_TIMEOUT_MILI); - goto out; + goto out_err; } err = mlx5_cmd_init(dev); @@ -1228,7 +1228,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, int err = 0; if (cleanup) - mlx5_drain_health_wq(dev); + mlx5_drain_health_recovery(dev); mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index fbc6e9e9e305..1874aa96c1a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -153,6 +153,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); +#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ + MLX5_CAP_GEN((mdev), pps_modify) && \ + MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ + MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj)) + void mlx5e_init(void); void mlx5e_cleanup(void); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 82bd6b0935f1..fd4a785431ac 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -881,8 +881,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; err_unmap: - --f; - while (f >= 0) { + while (--f >= 0) { frag = &skb_shinfo(skb)->frags[f]; dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 78f9e43420e0..f2da073f4b2b 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5034,12 +5034,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *uc; - int addr_count; unsigned int i; - addr_count = netdev_uc_count(net_dev); table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); - table->dev_uc_count = 1 + addr_count; ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); i = 1; netdev_for_each_uc_addr(uc, net_dev) { @@ -5050,6 +5047,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); i++; } + + table->dev_uc_count = i; } static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) @@ -5057,12 +5056,11 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *mc; - unsigned int i, addr_count; + unsigned int i; table->mc_overflow = false; table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); - addr_count = netdev_mc_count(net_dev); i = 0; netdev_for_each_mc_addr(mc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 199459bd6961..6ec8fc9aad8f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -45,9 +45,17 @@ struct geneve_net { static unsigned int geneve_net_id; +struct geneve_dev_node { + struct hlist_node hlist; + struct geneve_dev *geneve; +}; + /* Pseudo network device */ struct geneve_dev { - struct hlist_node hlist; /* vni hash table */ + struct geneve_dev_node hlist4; /* vni hash table for IPv4 socket */ +#if IS_ENABLED(CONFIG_IPV6) + struct geneve_dev_node hlist6; /* vni hash table for IPv6 socket */ +#endif struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ struct ip_tunnel_info info; @@ -123,16 +131,16 @@ static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, __be32 addr, u8 vni[]) { struct hlist_head *vni_list_head; - struct geneve_dev *geneve; + struct geneve_dev_node *node; __u32 hash; /* Find the device for this VNI */ hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; - hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { - if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) && - addr == geneve->info.key.u.ipv4.dst) - return geneve; + hlist_for_each_entry_rcu(node, vni_list_head, hlist) { + if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) && + addr == node->geneve->info.key.u.ipv4.dst) + return node->geneve; } return NULL; } @@ -142,16 +150,16 @@ static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs, struct in6_addr addr6, u8 vni[]) { struct hlist_head *vni_list_head; - struct geneve_dev *geneve; + struct geneve_dev_node *node; __u32 hash; /* Find the device for this VNI */ hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; - hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) { - if (eq_tun_id_and_vni((u8 *)&geneve->info.key.tun_id, vni) && - ipv6_addr_equal(&addr6, &geneve->info.key.u.ipv6.dst)) - return geneve; + hlist_for_each_entry_rcu(node, vni_list_head, hlist) { + if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) && + ipv6_addr_equal(&addr6, &node->geneve->info.key.u.ipv6.dst)) + return node->geneve; } return NULL; } @@ -579,6 +587,7 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) { struct net *net = geneve->net; struct geneve_net *gn = net_generic(net, geneve_net_id); + struct geneve_dev_node *node; struct geneve_sock *gs; __u8 vni[3]; __u32 hash; @@ -597,15 +606,20 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) out: gs->collect_md = geneve->collect_md; #if IS_ENABLED(CONFIG_IPV6) - if (ipv6) + if (ipv6) { rcu_assign_pointer(geneve->sock6, gs); - else + node = &geneve->hlist6; + } else #endif + { rcu_assign_pointer(geneve->sock4, gs); + node = &geneve->hlist4; + } + node->geneve = geneve; tunnel_id_to_vni(geneve->info.key.tun_id, vni); hash = geneve_net_vni_hash(vni); - hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]); + hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]); return 0; } @@ -632,8 +646,10 @@ static int geneve_stop(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); - if (!hlist_unhashed(&geneve->hlist)) - hlist_del_rcu(&geneve->hlist); + hlist_del_init_rcu(&geneve->hlist4.hlist); +#if IS_ENABLED(CONFIG_IPV6) + hlist_del_init_rcu(&geneve->hlist6.hlist); +#endif geneve_sock_release(geneve); return 0; } diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 6f6ed75b63c9..765de3bedb88 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val) static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) { struct usb_device *dev = mcs->usbdev; - int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, - MCS_RD_RTYPE, 0, reg, val, 2, - msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + void *dmabuf; + int ret; + + dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, + MCS_RD_RTYPE, 0, reg, dmabuf, 2, + msecs_to_jiffies(MCS_CTRL_TIMEOUT)); + + memcpy(val, dmabuf, sizeof(__u16)); + kfree(dmabuf); return ret; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index eebb0e1c70ff..b30d9ceee8bc 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev) if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); + + /* Now we can run the state machine synchronously */ + phy_state_machine(&phydev->state_queue.work); } /** diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index f9c0e62716ea..18fb00d55aa1 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -120,6 +120,7 @@ struct ppp { int n_channels; /* how many channels are attached 54 */ spinlock_t rlock; /* lock for receive side 58 */ spinlock_t wlock; /* lock for transmit side 5c */ + int *xmit_recursion __percpu; /* xmit recursion detect */ int mru; /* max receive unit 60 */ unsigned int flags; /* control bits 64 */ unsigned int xstate; /* transmit state bits 68 */ @@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, struct ppp *ppp = netdev_priv(dev); int indx; int err; + int cpu; ppp->dev = dev; ppp->ppp_net = src_net; @@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, INIT_LIST_HEAD(&ppp->channels); spin_lock_init(&ppp->rlock); spin_lock_init(&ppp->wlock); + + ppp->xmit_recursion = alloc_percpu(int); + if (!ppp->xmit_recursion) { + err = -ENOMEM; + goto err1; + } + for_each_possible_cpu(cpu) + (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0; + #ifdef CONFIG_PPP_MULTILINK ppp->minseq = -1; skb_queue_head_init(&ppp->mrq); @@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set); if (err < 0) - return err; + goto err2; conf->file->private_data = &ppp->file; return 0; +err2: + free_percpu(ppp->xmit_recursion); +err1: + return err; } static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = { @@ -1398,18 +1413,16 @@ static void __ppp_xmit_process(struct ppp *ppp) ppp_xmit_unlock(ppp); } -static DEFINE_PER_CPU(int, ppp_xmit_recursion); - static void ppp_xmit_process(struct ppp *ppp) { local_bh_disable(); - if (unlikely(__this_cpu_read(ppp_xmit_recursion))) + if (unlikely(*this_cpu_ptr(ppp->xmit_recursion))) goto err; - __this_cpu_inc(ppp_xmit_recursion); + (*this_cpu_ptr(ppp->xmit_recursion))++; __ppp_xmit_process(ppp); - __this_cpu_dec(ppp_xmit_recursion); + (*this_cpu_ptr(ppp->xmit_recursion))--; local_bh_enable(); @@ -1900,23 +1913,23 @@ static void __ppp_channel_push(struct channel *pch) spin_unlock_bh(&pch->downl); /* see if there is anything from the attached unit to be sent */ if (skb_queue_empty(&pch->file.xq)) { - read_lock_bh(&pch->upl); ppp = pch->ppp; if (ppp) __ppp_xmit_process(ppp); - read_unlock_bh(&pch->upl); } } static void ppp_channel_push(struct channel *pch) { - local_bh_disable(); - - __this_cpu_inc(ppp_xmit_recursion); - __ppp_channel_push(pch); - __this_cpu_dec(ppp_xmit_recursion); - - local_bh_enable(); + read_lock_bh(&pch->upl); + if (pch->ppp) { + (*this_cpu_ptr(pch->ppp->xmit_recursion))++; + __ppp_channel_push(pch); + (*this_cpu_ptr(pch->ppp->xmit_recursion))--; + } else { + __ppp_channel_push(pch); + } + read_unlock_bh(&pch->upl); } /* @@ -3055,6 +3068,7 @@ static void ppp_destroy_interface(struct ppp *ppp) #endif /* CONFIG_PPP_FILTER */ kfree_skb(ppp->xmit_pending); + free_percpu(ppp->xmit_recursion); free_netdev(ppp->dev); } diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 4d4173d25dd0..d88ae3c2edbf 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -106,7 +106,7 @@ struct major_info { struct rcu_head rcu; dev_t major; struct idr minor_idr; - struct mutex minor_lock; + spinlock_t minor_lock; const char *device_name; struct list_head next; }; @@ -416,15 +416,15 @@ int tap_get_minor(dev_t major, struct tap_dev *tap) goto unlock; } - mutex_lock(&tap_major->minor_lock); - retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL); + spin_lock(&tap_major->minor_lock); + retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC); if (retval >= 0) { tap->minor = retval; } else if (retval == -ENOSPC) { netdev_err(tap->dev, "Too many tap devices\n"); retval = -EINVAL; } - mutex_unlock(&tap_major->minor_lock); + spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); @@ -442,12 +442,12 @@ void tap_free_minor(dev_t major, struct tap_dev *tap) goto unlock; } - mutex_lock(&tap_major->minor_lock); + spin_lock(&tap_major->minor_lock); if (tap->minor) { idr_remove(&tap_major->minor_idr, tap->minor); tap->minor = 0; } - mutex_unlock(&tap_major->minor_lock); + spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); @@ -467,13 +467,13 @@ static struct tap_dev *dev_get_by_tap_file(int major, int minor) goto unlock; } - mutex_lock(&tap_major->minor_lock); + spin_lock(&tap_major->minor_lock); tap = idr_find(&tap_major->minor_idr, minor); if (tap) { dev = tap->dev; dev_hold(dev); } - mutex_unlock(&tap_major->minor_lock); + spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); @@ -1227,7 +1227,7 @@ static int tap_list_add(dev_t major, const char *device_name) tap_major->major = MAJOR(major); idr_init(&tap_major->minor_idr); - mutex_init(&tap_major->minor_lock); + spin_lock_init(&tap_major->minor_lock); tap_major->device_name = device_name; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 9ee7d4275640..5bd954d12541 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1876,6 +1876,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) err_detach: tun_detach_all(dev); + /* register_netdevice() already called tun_free_netdev() */ + goto err_free_dev; + err_free_flow: tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 32a22f4e8356..473f91322368 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ @@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf, static void qmi_wwan_disconnect(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - struct qmi_wwan_state *info = (void *)&dev->data; + struct qmi_wwan_state *info; struct list_head *iter; struct net_device *ldev; + /* called twice if separate control and data intf */ + if (!dev) + return; + info = (void *)&dev->data; if (info->flags & QMI_WWAN_FLAG_MUX) { if (!rtnl_trylock()) { restart_syscall(); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 143d8a95a60d..acb754eb1ccb 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf += headroom; /* advance address leaving hole at front of pkt */ - ctx = (void *)(unsigned long)len; get_page(alloc_frag->page); alloc_frag->offset += len + headroom; hole = alloc_frag->size - alloc_frag->offset; if (hole < len + headroom) { /* To avoid internal fragmentation, if there is very likely not * enough space for another buffer, add the remaining space to - * the current buffer. This extra space is not included in - * the truesize stored in ctx. + * the current buffer. */ len += hole; alloc_frag->offset += hole; } sg_init_one(rq->sg, buf, len); + ctx = (void *)(unsigned long)len; err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -2221,6 +2220,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) kfree(names); kfree(callbacks); kfree(vqs); + kfree(ctx); return 0; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 022c0b5f9844..c7ee8e3cd38d 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -926,15 +926,10 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) static void vrf_dev_uninit(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct net_device *port_dev; - struct list_head *iter; vrf_rtable_release(dev, vrf); vrf_rt6_release(dev, vrf); - netdev_for_each_lower_dev(dev, port_dev, iter) - vrf_del_slave(dev, port_dev); - free_percpu(dev->dstats); dev->dstats = NULL; } @@ -1389,6 +1384,12 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) static void vrf_dellink(struct net_device *dev, struct list_head *head) { + struct net_device *port_dev; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, port_dev, iter) + vrf_del_slave(dev, port_dev); + unregister_netdevice_queue(dev, head); } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5fa798a5c9a6..c4e540126258 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -228,15 +228,15 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) { - struct vxlan_dev *vxlan; + struct vxlan_dev_node *node; /* For flow based devices, map all packets to VNI 0 */ if (vs->flags & VXLAN_F_COLLECT_METADATA) vni = 0; - hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { - if (vxlan->default_dst.remote_vni == vni) - return vxlan; + hlist_for_each_entry_rcu(node, vni_head(vs, vni), hlist) { + if (node->vxlan->default_dst.remote_vni == vni) + return node->vxlan; } return NULL; @@ -2365,17 +2365,22 @@ static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); spin_lock(&vn->sock_lock); - hlist_del_init_rcu(&vxlan->hlist); + hlist_del_init_rcu(&vxlan->hlist4.hlist); +#if IS_ENABLED(CONFIG_IPV6) + hlist_del_init_rcu(&vxlan->hlist6.hlist); +#endif spin_unlock(&vn->sock_lock); } -static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) +static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan, + struct vxlan_dev_node *node) { struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); __be32 vni = vxlan->default_dst.remote_vni; + node->vxlan = vxlan; spin_lock(&vn->sock_lock); - hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); + hlist_add_head_rcu(&node->hlist, vni_head(vs, vni)); spin_unlock(&vn->sock_lock); } @@ -2819,6 +2824,7 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) { struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_sock *vs = NULL; + struct vxlan_dev_node *node; if (!vxlan->cfg.no_share) { spin_lock(&vn->sock_lock); @@ -2836,12 +2842,16 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) if (IS_ERR(vs)) return PTR_ERR(vs); #if IS_ENABLED(CONFIG_IPV6) - if (ipv6) + if (ipv6) { rcu_assign_pointer(vxlan->vn6_sock, vs); - else + node = &vxlan->hlist6; + } else #endif + { rcu_assign_pointer(vxlan->vn4_sock, vs); - vxlan_vs_add_dev(vs, vxlan); + node = &vxlan->hlist4; + } + vxlan_vs_add_dev(vs, vxlan, node); return 0; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index ae3043559b6d..fe5102ca5010 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1821,8 +1821,6 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah) static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum) { REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR); - REG_SET_BIT(ah, 0x9864, 0x7f000); - REG_SET_BIT(ah, 0x9924, 0x7f00fe); REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); REG_WRITE(ah, AR_CR, AR_CR_RXD); REG_WRITE(ah, AR_DLCL_IFS(qnum), 0); diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c index 568b1c6c2b2b..73f46fb3e83a 100644 --- a/drivers/net/wireless/ath/ath9k/rng.c +++ b/drivers/net/wireless/ath/ath9k/rng.c @@ -120,6 +120,8 @@ void ath9k_rng_start(struct ath_softc *sc) void ath9k_rng_stop(struct ath_softc *sc) { - if (sc->rng_task) + if (sc->rng_task) { kthread_stop(sc->rng_task); + sc->rng_task = NULL; + } } diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index 16aca9e28b77..1fa7f844b5da 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -189,22 +189,27 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf, if (strtobool(buf, &start)) return -EINVAL; + mutex_lock(&sc->mutex); + if (start == sc->tx99_state) { if (!start) - return count; + goto out; ath_dbg(common, XMIT, "Resetting TX99\n"); ath9k_tx99_deinit(sc); } if (!start) { ath9k_tx99_deinit(sc); - return count; + goto out; } r = ath9k_tx99_init(sc); - if (r) + if (r) { + mutex_unlock(&sc->mutex); return r; - + } +out: + mutex_unlock(&sc->mutex); return count; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 9b970dc2b922..844c1e68ec03 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -706,7 +706,7 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt) int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, struct sk_buff_head *pktq, uint totlen) { - struct sk_buff *glom_skb; + struct sk_buff *glom_skb = NULL; struct sk_buff *skb; u32 addr = sdiodev->sbwad; int err = 0; @@ -727,10 +727,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, return -ENOMEM; err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, glom_skb); - if (err) { - brcmu_pkt_buf_free_skb(glom_skb); + if (err) goto done; - } skb_queue_walk(pktq, skb) { memcpy(skb->data, glom_skb->data, skb->len); @@ -741,6 +739,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, pktq); done: + brcmu_pkt_buf_free_skb(glom_skb); return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 617199c0e5a0..210f2dc58b74 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4851,6 +4851,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_KERNEL); } else if (ieee80211_is_action(mgmt->frame_control)) { + if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { + brcmf_err("invalid action frame length\n"); + err = -EINVAL; + goto exit; + } af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); if (af_params == NULL) { brcmf_err("unable to allocate frame\n"); @@ -6850,7 +6855,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info)); if (!wiphy) { brcmf_err("Could not allocate wiphy device\n"); - return NULL; + goto ops_out; } memcpy(wiphy->perm_addr, drvr->mac, ETH_ALEN); set_wiphy_dev(wiphy, busdev); @@ -6993,6 +6998,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, ifp->vif = NULL; wiphy_out: brcmf_free_wiphy(wiphy); +ops_out: kfree(ops); return NULL; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 5653d6dd38f6..d44f59ef4f72 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4168,11 +4168,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) goto fail; } - /* allocate scatter-gather table. sg support - * will be disabled upon allocation failure. - */ - brcmf_sdiod_sgtable_alloc(bus->sdiodev); - /* Query the F2 block size, set roundup accordingly */ bus->blocksize = bus->sdiodev->func[2]->cur_blksize; bus->roundup = min(max_roundup, bus->blocksize); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index 4b97371c3b42..838946d17b59 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb) next_reclaimed; IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", next_reclaimed); + iwlagn_check_ratid_empty(priv, sta_id, tid); } iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); - iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; /* process frames */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a67aa1f5a51c..db5ad222e5ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1974,14 +1974,32 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), "Failed to update SF upon disassociation\n"); - /* remove AP station now that the MAC is unassoc */ - ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); - if (ret) - IWL_ERR(mvm, "failed to remove AP station\n"); + /* + * If we get an assert during the connection (after the + * station has been added, but before the vif is set + * to associated), mac80211 will re-add the station and + * then configure the vif. Since the vif is not + * associated, we would remove the station here and + * this would fail the recovery. + */ + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, + &mvm->status)) { + /* + * Remove AP station now that + * the MAC is unassoc + */ + ret = iwl_mvm_rm_sta_id(mvm, vif, + mvmvif->ap_sta_id); + if (ret) + IWL_ERR(mvm, + "failed to remove AP station\n"); + + if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) + mvm->d0i3_ap_sta_id = + IWL_MVM_INVALID_STA; + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + } - if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; /* remove quota for this interface */ ret = iwl_mvm_update_quotas(mvm, false, NULL); if (ret) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 7ec06bf13413..025bc06a19d6 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2964,10 +2964,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, if (!dev) { mwifiex_dbg(adapter, ERROR, "no memory available for netdevice\n"); - memset(&priv->wdev, 0, sizeof(priv->wdev)); - priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto err_alloc_netdev; } mwifiex_init_priv_params(priv, dev); @@ -2976,11 +2974,11 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, HostCmd_ACT_GEN_SET, 0, NULL, true); if (ret) - return ERR_PTR(ret); + goto err_set_bss_mode; ret = mwifiex_sta_init_cmd(priv, false, false); if (ret) - return ERR_PTR(ret); + goto err_sta_init; mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv); if (adapter->is_hw_11ac_capable) @@ -3011,31 +3009,14 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, SET_NETDEV_DEV(dev, adapter->dev); - /* Register network device */ - if (register_netdevice(dev)) { - mwifiex_dbg(adapter, ERROR, - "cannot register virtual network device\n"); - free_netdev(dev); - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - priv->netdev = NULL; - memset(&priv->wdev, 0, sizeof(priv->wdev)); - priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-EFAULT); - } - priv->dfs_cac_workqueue = alloc_workqueue("MWIFIEX_DFS_CAC%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1, name); if (!priv->dfs_cac_workqueue) { - mwifiex_dbg(adapter, ERROR, - "cannot register virtual network device\n"); - free_netdev(dev); - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - priv->netdev = NULL; - memset(&priv->wdev, 0, sizeof(priv->wdev)); - priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - return ERR_PTR(-ENOMEM); + mwifiex_dbg(adapter, ERROR, "cannot alloc DFS CAC queue\n"); + ret = -ENOMEM; + goto err_alloc_cac; } INIT_DELAYED_WORK(&priv->dfs_cac_work, mwifiex_dfs_cac_work_queue); @@ -3044,16 +3025,9 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, name); if (!priv->dfs_chan_sw_workqueue) { - mwifiex_dbg(adapter, ERROR, - "cannot register virtual network device\n"); - free_netdev(dev); - priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; - priv->netdev = NULL; - memset(&priv->wdev, 0, sizeof(priv->wdev)); - priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - destroy_workqueue(priv->dfs_cac_workqueue); - priv->dfs_cac_workqueue = NULL; - return ERR_PTR(-ENOMEM); + mwifiex_dbg(adapter, ERROR, "cannot alloc DFS channel sw queue\n"); + ret = -ENOMEM; + goto err_alloc_chsw; } INIT_DELAYED_WORK(&priv->dfs_chan_sw_work, @@ -3061,6 +3035,13 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, sema_init(&priv->async_sem, 1); + /* Register network device */ + if (register_netdevice(dev)) { + mwifiex_dbg(adapter, ERROR, "cannot register network device\n"); + ret = -EFAULT; + goto err_reg_netdev; + } + mwifiex_dbg(adapter, INFO, "info: %s: Marvell 802.11 Adapter\n", dev->name); @@ -3081,11 +3062,29 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, adapter->curr_iface_comb.p2p_intf++; break; default: + /* This should be dead code; checked above */ mwifiex_dbg(adapter, ERROR, "type not supported\n"); return ERR_PTR(-EINVAL); } return &priv->wdev; + +err_reg_netdev: + destroy_workqueue(priv->dfs_chan_sw_workqueue); + priv->dfs_chan_sw_workqueue = NULL; +err_alloc_chsw: + destroy_workqueue(priv->dfs_cac_workqueue); + priv->dfs_cac_workqueue = NULL; +err_alloc_cac: + free_netdev(dev); + priv->netdev = NULL; +err_sta_init: +err_set_bss_mode: +err_alloc_netdev: + memset(&priv->wdev, 0, sizeof(priv->wdev)); + priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; + return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index f949ad2bd898..fa3547e06424 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -70,10 +70,10 @@ #define WSPI_MAX_CHUNK_SIZE 4092 /* - * wl18xx driver aggregation buffer size is (13 * PAGE_SIZE) compared to - * (4 * PAGE_SIZE) for wl12xx, so use the larger buffer needed for wl18xx + * wl18xx driver aggregation buffer size is (13 * 4K) compared to + * (4 * 4K) for wl12xx, so use the larger buffer needed for wl18xx */ -#define SPI_AGGR_BUFFER_SIZE (13 * PAGE_SIZE) +#define SPI_AGGR_BUFFER_SIZE (13 * SZ_4K) /* Maximum number of SPI write chunks */ #define WSPI_MAX_NUM_OF_CHUNKS \ diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c index c38bdd6a5a82..a47af81e0fc3 100644 --- a/drivers/nfc/nfcmrvl/fw_dnld.c +++ b/drivers/nfc/nfcmrvl/fw_dnld.c @@ -458,7 +458,7 @@ int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv) INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work); snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq", - dev_name(priv->dev)); + dev_name(&priv->ndev->nfc_dev->dev)); priv->fw_dnld.rx_wq = create_singlethread_workqueue(name); if (!priv->fw_dnld.rx_wq) return -ENOMEM; @@ -495,6 +495,7 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld; + int res; if (!priv->support_fw_dnld) return -ENOTSUPP; @@ -510,7 +511,9 @@ int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name) */ /* Retrieve FW binary */ - if (request_firmware(&fw_dnld->fw, firmware_name, priv->dev) < 0) { + res = request_firmware(&fw_dnld->fw, firmware_name, + &ndev->nfc_dev->dev); + if (res < 0) { nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name); return -ENOENT; } diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c index 51c8240a1672..a446590a71ca 100644 --- a/drivers/nfc/nfcmrvl/main.c +++ b/drivers/nfc/nfcmrvl/main.c @@ -124,12 +124,13 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, memcpy(&priv->config, pdata, sizeof(*pdata)); if (priv->config.reset_n_io) { - rc = devm_gpio_request_one(dev, - priv->config.reset_n_io, - GPIOF_OUT_INIT_LOW, - "nfcmrvl_reset_n"); - if (rc < 0) + rc = gpio_request_one(priv->config.reset_n_io, + GPIOF_OUT_INIT_LOW, + "nfcmrvl_reset_n"); + if (rc < 0) { + priv->config.reset_n_io = 0; nfc_err(dev, "failed to request reset_n io\n"); + } } if (phy == NFCMRVL_PHY_SPI) { @@ -154,7 +155,13 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, if (!priv->ndev) { nfc_err(dev, "nci_allocate_device failed\n"); rc = -ENOMEM; - goto error; + goto error_free_gpio; + } + + rc = nfcmrvl_fw_dnld_init(priv); + if (rc) { + nfc_err(dev, "failed to initialize FW download %d\n", rc); + goto error_free_dev; } nci_set_drvdata(priv->ndev, priv); @@ -162,24 +169,22 @@ struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, rc = nci_register_device(priv->ndev); if (rc) { nfc_err(dev, "nci_register_device failed %d\n", rc); - goto error_free_dev; + goto error_fw_dnld_deinit; } /* Ensure that controller is powered off */ nfcmrvl_chip_halt(priv); - rc = nfcmrvl_fw_dnld_init(priv); - if (rc) { - nfc_err(dev, "failed to initialize FW download %d\n", rc); - goto error_free_dev; - } - nfc_info(dev, "registered with nci successfully\n"); return priv; +error_fw_dnld_deinit: + nfcmrvl_fw_dnld_deinit(priv); error_free_dev: nci_free_device(priv->ndev); -error: +error_free_gpio: + if (priv->config.reset_n_io) + gpio_free(priv->config.reset_n_io); kfree(priv); return ERR_PTR(rc); } @@ -195,7 +200,7 @@ void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) nfcmrvl_fw_dnld_deinit(priv); if (priv->config.reset_n_io) - devm_gpio_free(priv->dev, priv->config.reset_n_io); + gpio_free(priv->config.reset_n_io); nci_unregister_device(ndev); nci_free_device(ndev); diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c index 83a99e38e7bd..6c0c301611c4 100644 --- a/drivers/nfc/nfcmrvl/uart.c +++ b/drivers/nfc/nfcmrvl/uart.c @@ -109,6 +109,7 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu) struct nfcmrvl_private *priv; struct nfcmrvl_platform_data *pdata = NULL; struct nfcmrvl_platform_data config; + struct device *dev = nu->tty->dev; /* * Platform data cannot be used here since usually it is already used @@ -116,9 +117,8 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu) * and check if DT entries were added. */ - if (nu->tty->dev->parent && nu->tty->dev->parent->of_node) - if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node, - &config) == 0) + if (dev && dev->parent && dev->parent->of_node) + if (nfcmrvl_uart_parse_dt(dev->parent->of_node, &config) == 0) pdata = &config; if (!pdata) { @@ -131,7 +131,7 @@ static int nfcmrvl_nci_uart_open(struct nci_uart *nu) } priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops, - nu->tty->dev, pdata); + dev, pdata); if (IS_ERR(priv)) return PTR_ERR(priv); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 10e5bf460139..f27d1344d198 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -920,10 +920,8 @@ static void ntb_transport_link_work(struct work_struct *work) ntb_free_mw(nt, i); /* if there's an actual failure, we should just bail */ - if (rc < 0) { - ntb_link_disable(ndev); + if (rc < 0) return; - } out: if (ntb_link_is_up(ndev, NULL, NULL) == 1) diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 983718b8fd9b..3cd58143d468 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1248,10 +1248,13 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, struct page *page, bool is_write) { struct btt *btt = bdev->bd_disk->private_data; + int rc; - btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector); - page_endio(page, is_write, 0); - return 0; + rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector); + if (rc == 0) + page_endio(page, is_write, 0); + + return rc; } diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index 7ceb5fa4f2a1..4baa4ce825ab 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -260,8 +260,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, * work around this collision. */ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) - && !(flags & NVDIMM_IO_ATOMIC) - && !ndns->claim) { + && !(flags & NVDIMM_IO_ATOMIC)) { long cleared; cleared = nvdimm_clear_poison(&ndns->dev, diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 2dee908e4bae..932c3994ac52 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -421,14 +421,15 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num) static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) { const unsigned int sector_size = 512; - sector_t start_sector; + sector_t start_sector, end_sector; u64 num_sectors; u32 rem; start_sector = div_u64(ns_offset, sector_size); - num_sectors = div_u64_rem(len, sector_size, &rem); + end_sector = div_u64_rem(ns_offset + len, sector_size, &rem); if (rem) - num_sectors++; + end_sector++; + num_sectors = end_sector - start_sector; if (unlikely(num_sectors > (u64)INT_MAX)) { u64 remaining = num_sectors; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 24397d306d53..cab0cd8b7273 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -88,7 +88,7 @@ enum nvme_rdma_queue_flags { struct nvme_rdma_queue { struct nvme_rdma_qe *rsp_ring; - u8 sig_count; + atomic_t sig_count; int queue_size; size_t cmnd_capsule_len; struct nvme_rdma_ctrl *ctrl; @@ -553,6 +553,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, queue->cmnd_capsule_len = sizeof(struct nvme_command); queue->queue_size = queue_size; + atomic_set(&queue->sig_count, 0); queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, RDMA_PS_TCP, IB_QPT_RC); @@ -1040,17 +1041,16 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) nvme_rdma_wr_error(cq, wc, "SEND"); } -static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) +/* + * We want to signal completion at least every queue depth/2. This returns the + * largest power of two that is not above half of (queue size + 1) to optimize + * (avoid divisions). + */ +static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) { - int sig_limit; + int limit = 1 << ilog2((queue->queue_size + 1) / 2); - /* - * We signal completion every queue depth/2 and also handle the - * degenerated case of a device with queue_depth=1, where we - * would need to signal every message. - */ - sig_limit = max(queue->queue_size / 2, 1); - return (++queue->sig_count % sig_limit) == 0; + return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0; } static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 8c830a80a648..6cf916d9db6d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -489,21 +489,24 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) rval = device_add(&nvmem->dev); if (rval) - goto out; + goto err_put_device; if (config->compat) { rval = nvmem_setup_compat(nvmem, config); if (rval) - goto out; + goto err_device_del; } if (config->cells) nvmem_add_cells(nvmem, config); return nvmem; -out: - ida_simple_remove(&nvmem_ida, nvmem->id); - kfree(nvmem); + +err_device_del: + device_del(&nvmem->dev); +err_put_device: + put_device(&nvmem->dev); + return ERR_PTR(rval); } EXPORT_SYMBOL_GPL(nvmem_register); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index e32ca2ef9e54..56c93f096de9 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) + return DMA_ERROR_CODE; BUG_ON(size <= 0); @@ -814,6 +816,10 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long)iova, size); @@ -918,6 +924,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) + return 0; DBG_RUN_SG("%s() START %d entries\n", __func__, nents); @@ -990,6 +998,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } DBG_RUN_SG("%s() START %d entries, %p,%x\n", __func__, nents, sg_virt(sglist), sglist->length); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 1133b5cc88ca..ed92c1254cff 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -154,7 +154,10 @@ struct dino_device }; /* Looks nice and keeps the compiler happy */ -#define DINO_DEV(d) ((struct dino_device *) d) +#define DINO_DEV(d) ({ \ + void *__pdata = d; \ + BUG_ON(!__pdata); \ + (struct dino_device *)__pdata; }) /* @@ -953,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev) dino_dev->hba.dev = dev; dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); - dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ + dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND; spin_lock_init(&dino_dev->dinosaur_pen); dino_dev->hba.iommu = ccio_get_iommu(dev); diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 2ec2aef4d211..bc286cbbbc9b 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -111,8 +111,10 @@ static u32 lba_t32; /* Looks nice and keeps the compiler happy */ -#define LBA_DEV(d) ((struct lba_device *) (d)) - +#define LBA_DEV(d) ({ \ + void *__pdata = d; \ + BUG_ON(!__pdata); \ + (struct lba_device *)__pdata; }) /* ** Only allow 8 subsidiary busses per LBA diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 33385e574433..87ad5fd6a7a2 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask) return 0; ioc = GET_IOC(dev); + if (!ioc) + return 0; /* * check if mask is >= than the current max IO Virt Address @@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int pide; ioc = GET_IOC(dev); + if (!ioc) + return DMA_ERROR_CODE; /* save offset bits */ offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; @@ -813,6 +817,10 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } offset = iova & ~IOVP_MASK; iova ^= offset; /* clear offset bits */ size += offset; @@ -952,6 +960,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, DBG_RUN_SG("%s() START %d entries\n", __func__, nents); ioc = GET_IOC(dev); + if (!ioc) + return 0; /* Fast path single entry scatterlists. */ if (nents == 1) { @@ -1037,6 +1047,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, __func__, nents, sg_virt(sglist), sglist->length); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } #ifdef SBA_COLLECT_STATS ioc->usg_calls++; diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 0e020b6e0943..4590eb95d5d8 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -139,6 +139,7 @@ PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ PCIE_CORE_INT_MMVC) +#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) #define PCIE_RC_CONFIG_SCC_SHIFT 16 @@ -295,7 +296,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 *val) { - void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where; + void __iomem *addr; + + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; @@ -319,11 +322,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 val) { u32 mask, tmp, offset; + void __iomem *addr; offset = where & ~0x3; + addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; if (size == 4) { - writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + writel(val, addr); return PCIBIOS_SUCCESSFUL; } @@ -334,9 +339,9 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, * corrupt RW1C bits in adjacent registers. But the hardware * doesn't support smaller writes. */ - tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask; + tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); - writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); + writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index e27ad2a3bd33..642a182893ce 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c @@ -733,10 +733,10 @@ static void vmd_remove(struct pci_dev *dev) struct vmd_dev *vmd = pci_get_drvdata(dev); vmd_detach_resources(vmd); - vmd_cleanup_srcu(vmd); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); + vmd_cleanup_srcu(vmd); vmd_teardown_dma_ops(vmd); irq_domain_remove(vmd->irq_domain); } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index ba44fdfda66b..9e1569107cd6 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, for (;;) { if (affd) { - nvec = irq_calc_affinity_vectors(nvec, affd); + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } @@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, for (;;) { if (affd) { - nvec = irq_calc_affinity_vectors(nvec, affd); + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } @@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, if (flags & PCI_IRQ_AFFINITY) { if (!affd) affd = &msi_default_affd; - - if (affd->pre_vectors + affd->post_vectors > min_vecs) - return -EINVAL; - - /* - * If there aren't any vectors left after applying the pre/post - * vectors don't bother with assigning affinity. - */ - if (affd->pre_vectors + affd->post_vectors == min_vecs) - affd = NULL; } else { if (WARN_ON(affd)) affd = NULL; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 192e7b681b96..b399fa31e0a2 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -964,6 +964,7 @@ static int pci_pm_thaw_noirq(struct device *dev) return pci_legacy_resume_early(dev); pci_update_current_state(pci_dev, PCI_D0); + pci_restore_state(pci_dev); if (drv && drv->pm && drv->pm->thaw_noirq) error = drv->pm->thaw_noirq(dev); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 563901cd9c06..9e5483780c97 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4069,40 +4069,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) return pci_reset_hotplug_slot(dev->slot->hotplug, probe); } -static int __pci_dev_reset(struct pci_dev *dev, int probe) -{ - int rc; - - might_sleep(); - - rc = pci_dev_specific_reset(dev, probe); - if (rc != -ENOTTY) - goto done; - - if (pcie_has_flr(dev)) { - if (!probe) - pcie_flr(dev); - rc = 0; - goto done; - } - - rc = pci_af_flr(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_pm_reset(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_dev_reset_slot_function(dev, probe); - if (rc != -ENOTTY) - goto done; - - rc = pci_parent_bus_reset(dev, probe); -done: - return rc; -} - static void pci_dev_lock(struct pci_dev *dev) { pci_cfg_access_lock(dev); @@ -4141,6 +4107,12 @@ static void pci_reset_notify(struct pci_dev *dev, bool prepare) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; + + /* + * dev->driver->err_handler->reset_notify() is protected against + * races with ->remove() by the device lock, which must be held by + * the caller. + */ if (err_handler && err_handler->reset_notify) err_handler->reset_notify(dev, prepare); } @@ -4173,21 +4145,6 @@ static void pci_dev_restore(struct pci_dev *dev) pci_reset_notify(dev, false); } -static int pci_dev_reset(struct pci_dev *dev, int probe) -{ - int rc; - - if (!probe) - pci_dev_lock(dev); - - rc = __pci_dev_reset(dev, probe); - - if (!probe) - pci_dev_unlock(dev); - - return rc; -} - /** * __pci_reset_function - reset a PCI device function * @dev: PCI device to reset @@ -4207,7 +4164,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) */ int __pci_reset_function(struct pci_dev *dev) { - return pci_dev_reset(dev, 0); + int ret; + + pci_dev_lock(dev); + ret = __pci_reset_function_locked(dev); + pci_dev_unlock(dev); + + return ret; } EXPORT_SYMBOL_GPL(__pci_reset_function); @@ -4232,7 +4195,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function); */ int __pci_reset_function_locked(struct pci_dev *dev) { - return __pci_dev_reset(dev, 0); + int rc; + + might_sleep(); + + rc = pci_dev_specific_reset(dev, 0); + if (rc != -ENOTTY) + return rc; + if (pcie_has_flr(dev)) { + pcie_flr(dev); + return 0; + } + rc = pci_af_flr(dev, 0); + if (rc != -ENOTTY) + return rc; + rc = pci_pm_reset(dev, 0); + if (rc != -ENOTTY) + return rc; + rc = pci_dev_reset_slot_function(dev, 0); + if (rc != -ENOTTY) + return rc; + return pci_parent_bus_reset(dev, 0); } EXPORT_SYMBOL_GPL(__pci_reset_function_locked); @@ -4249,7 +4232,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked); */ int pci_probe_reset_function(struct pci_dev *dev) { - return pci_dev_reset(dev, 1); + int rc; + + might_sleep(); + + rc = pci_dev_specific_reset(dev, 1); + if (rc != -ENOTTY) + return rc; + if (pcie_has_flr(dev)) + return 0; + rc = pci_af_flr(dev, 1); + if (rc != -ENOTTY) + return rc; + rc = pci_pm_reset(dev, 1); + if (rc != -ENOTTY) + return rc; + rc = pci_dev_reset_slot_function(dev, 1); + if (rc != -ENOTTY) + return rc; + + return pci_parent_bus_reset(dev, 1); } /** @@ -4272,20 +4274,57 @@ int pci_reset_function(struct pci_dev *dev) { int rc; - rc = pci_dev_reset(dev, 1); + rc = pci_probe_reset_function(dev); if (rc) return rc; + pci_dev_lock(dev); pci_dev_save_and_disable(dev); - rc = pci_dev_reset(dev, 0); + rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); + pci_dev_unlock(dev); return rc; } EXPORT_SYMBOL_GPL(pci_reset_function); +/** + * pci_reset_function_locked - quiesce and reset a PCI device function + * @dev: PCI device to reset + * + * Some devices allow an individual function to be reset without affecting + * other functions in the same device. The PCI device must be responsive + * to PCI config space in order to use this function. + * + * This function does not just reset the PCI portion of a device, but + * clears all the state associated with the device. This function differs + * from __pci_reset_function() in that it saves and restores device state + * over the reset. It also differs from pci_reset_function() in that it + * requires the PCI device lock to be held. + * + * Returns 0 if the device function was successfully reset or negative if the + * device doesn't support resetting a single function. + */ +int pci_reset_function_locked(struct pci_dev *dev) +{ + int rc; + + rc = pci_probe_reset_function(dev); + if (rc) + return rc; + + pci_dev_save_and_disable(dev); + + rc = __pci_reset_function_locked(dev); + + pci_dev_restore(dev); + + return rc; +} +EXPORT_SYMBOL_GPL(pci_reset_function_locked); + /** * pci_try_reset_function - quiesce and reset a PCI device function * @dev: PCI device to reset @@ -4296,20 +4335,18 @@ int pci_try_reset_function(struct pci_dev *dev) { int rc; - rc = pci_dev_reset(dev, 1); + rc = pci_probe_reset_function(dev); if (rc) return rc; - pci_dev_save_and_disable(dev); + if (!pci_dev_trylock(dev)) + return -EAGAIN; - if (pci_dev_trylock(dev)) { - rc = __pci_dev_reset(dev, 0); - pci_dev_unlock(dev); - } else - rc = -EAGAIN; + pci_dev_save_and_disable(dev); + rc = __pci_reset_function_locked(dev); + pci_dev_unlock(dev); pci_dev_restore(dev); - return rc; } EXPORT_SYMBOL_GPL(pci_try_reset_function); @@ -4459,7 +4496,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_save_and_disable(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_save_and_disable(dev->subordinate); } @@ -4474,7 +4513,9 @@ static void pci_bus_restore(struct pci_bus *bus) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { + pci_dev_lock(dev); pci_dev_restore(dev); + pci_dev_unlock(dev); if (dev->subordinate) pci_bus_restore(dev->subordinate); } diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 20f1b4493994..04e929fd0ffe 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1547,6 +1547,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), }, }, + { + .ident = "HP Chromebook 11 G5 (Setzer)", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), + }, + }, { .ident = "Acer Chromebook R11 (Cyan)", .matches = { diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index 4d4ef42a39b5..86c4b3fab7b0 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = { static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; -static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 }; -static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 }; -static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 }; +static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 }; +static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 }; +static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 }; static const unsigned int mrfld_pwm0_pins[] = { 144 }; static const unsigned int mrfld_pwm1_pins[] = { 145 }; static const unsigned int mrfld_pwm2_pins[] = { 132 }; diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index 9b00be15d258..df942272ba54 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c @@ -85,6 +85,7 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = { MESON_PIN(GPIODV_15, EE_OFF), MESON_PIN(GPIODV_16, EE_OFF), MESON_PIN(GPIODV_17, EE_OFF), + MESON_PIN(GPIODV_18, EE_OFF), MESON_PIN(GPIODV_19, EE_OFF), MESON_PIN(GPIODV_20, EE_OFF), MESON_PIN(GPIODV_21, EE_OFF), diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c index 998210eacf37..3046fd732155 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c @@ -89,6 +89,7 @@ static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = { MESON_PIN(GPIODV_15, EE_OFF), MESON_PIN(GPIODV_16, EE_OFF), MESON_PIN(GPIODV_17, EE_OFF), + MESON_PIN(GPIODV_18, EE_OFF), MESON_PIN(GPIODV_19, EE_OFF), MESON_PIN(GPIODV_20, EE_OFF), MESON_PIN(GPIODV_21, EE_OFF), diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 5c96f5558310..6aaeb0e9360e 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -176,7 +176,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = { }; const struct armada_37xx_pin_data armada_37xx_pin_sb = { - .nr_pins = 29, + .nr_pins = 30, .name = "GPIO2", .groups = armada_37xx_sb_groups, .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 7b0e6cc35e04..2ea8b1505138 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -205,8 +205,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd) spin_unlock_irqrestore(&bank->slock, flags); - exynos_irq_unmask(irqd); - return 0; } @@ -226,8 +224,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd) shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC]; mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1; - exynos_irq_mask(irqd); - spin_lock_irqsave(&bank->slock, flags); con = readl(bank->eint_base + reg_con); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c index fb30b86a97ee..5fbbdbf349b8 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c @@ -811,6 +811,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = { SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */ SUNXI_FUNCTION(0x3, "pata"), /* ATAD12 */ SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ + SUNXI_FUNCTION(0x5, "sim"), /* DET */ SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c index 706effe0a492..ad73db8d067b 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c @@ -508,57 +508,71 @@ static const unsigned usb1_pins[] = {48, 49}; static const int usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {50, 51}; static const int usb2_muxvals[] = {0, 0}; -static const unsigned port_range_pins[] = { +static const unsigned port_range0_pins[] = { 159, 160, 161, 162, 163, 164, 165, 166, /* PORT0x */ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */ - 16, 17, 18, -1, -1, -1, -1, -1, /* PORT3x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */ - -1, -1, -1, 46, 47, 48, 49, 50, /* PORT5x */ - 51, -1, -1, 54, 55, 56, 57, 58, /* PORT6x */ + 16, 17, 18, /* PORT30-32 */ +}; +static const int port_range0_muxvals[] = { + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ + 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ + 15, 15, 15, /* PORT30-32 */ +}; +static const unsigned port_range1_pins[] = { + 46, 47, 48, 49, 50, /* PORT53-57 */ + 51, /* PORT60 */ +}; +static const int port_range1_muxvals[] = { + 15, 15, 15, 15, 15, /* PORT53-57 */ + 15, /* PORT60 */ +}; +static const unsigned port_range2_pins[] = { + 54, 55, 56, 57, 58, /* PORT63-67 */ 59, 60, 69, 70, 71, 72, 73, 74, /* PORT7x */ 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */ 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */ 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ - 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */ - 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */ - 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ - 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ - 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */ - 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */ - 139, 140, 141, 142, -1, -1, -1, -1, /* PORT22x */ - 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */ - 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */ }; -static const int port_range_muxvals[] = { - 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ - 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ - 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ - 15, 15, 15, -1, -1, -1, -1, -1, /* PORT3x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */ - -1, -1, -1, 15, 15, 15, 15, 15, /* PORT5x */ - 15, -1, -1, 15, 15, 15, 15, 15, /* PORT6x */ +static const int port_range2_muxvals[] = { + 15, 15, 15, 15, 15, /* PORT63-67 */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ +}; +static const unsigned port_range3_pins[] = { + 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */ + 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */ + 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */ +}; +static const int port_range3_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ +}; +static const unsigned port_range4_pins[] = { + 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */ +}; +static const int port_range4_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ +}; +static const unsigned port_range5_pins[] = { + 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */ + 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */ + 139, 140, 141, 142, /* PORT220-223 */ +}; +static const int port_range5_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */ - 15, 15, 15, 15, -1, -1, -1, -1, /* PORT22x */ + 15, 15, 15, 15, /* PORT220-223 */ +}; +static const unsigned port_range6_pins[] = { + 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */ + 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */ +}; +static const int port_range6_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */ }; @@ -607,147 +621,153 @@ static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = { UNIPHIER_PINCTRL_GROUP(usb0), UNIPHIER_PINCTRL_GROUP(usb1), UNIPHIER_PINCTRL_GROUP(usb2), - UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range4), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range5), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range6), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives), - UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0), - UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1), - UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2), - UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3), - UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4), - UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5), - UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6), - UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7), - UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8), - UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9), - UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10), - UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11), - UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12), - UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13), - UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14), - UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15), - UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16), - UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17), - UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18), - UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19), - UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20), - UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21), - UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22), - UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23), - UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24), - UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25), - UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26), - UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43), - UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44), - UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45), - UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46), - UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47), - UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48), - UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51), - UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52), - UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53), - UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54), - UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55), - UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56), - UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57), - UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58), - UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59), - UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60), - UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61), - UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62), - UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63), - UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64), - UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65), - UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66), - UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67), - UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68), - UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69), - UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70), - UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71), - UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72), - UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73), - UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74), - UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75), - UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76), - UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77), - UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78), - UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79), - UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80), - UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81), - UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82), - UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83), - UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84), - UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85), - UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86), - UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87), - UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96), - UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97), - UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98), - UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99), - UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100), - UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101), - UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102), - UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103), - UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104), - UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105), - UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106), - UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107), - UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108), - UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109), - UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110), - UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111), - UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112), - UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113), - UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114), - UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115), - UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116), - UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117), - UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118), - UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119), - UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144), - UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145), - UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146), - UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147), - UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148), - UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149), - UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150), - UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151), - UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160), - UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161), - UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162), - UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163), - UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164), - UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165), - UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166), - UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167), - UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168), - UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169), - UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170), - UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171), - UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172), - UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173), - UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174), - UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175), - UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176), - UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177), - UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178), - UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179), - UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184), - UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185), - UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186), - UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187), - UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188), - UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189), - UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190), - UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191), - UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192), - UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193), - UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194), - UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195), - UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196), - UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197), - UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198), - UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199), + UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24), + UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25), + UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26), + UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range1, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range1, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range1, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range1, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range1, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range1, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range2, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range2, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range2, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range2, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range2, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range2, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range2, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range2, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range2, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range2, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range2, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range2, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range2, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range2, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range2, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range2, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range2, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range2, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range2, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range2, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range2, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range2, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range2, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range2, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range2, 24), + UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range2, 25), + UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range2, 26), + UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range2, 27), + UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range2, 28), + UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range2, 29), + UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range2, 30), + UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range2, 31), + UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range2, 32), + UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range2, 33), + UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range2, 34), + UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range2, 35), + UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range2, 36), + UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range3, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range3, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range3, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range3, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range3, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range3, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range3, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range3, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range3, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range3, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range3, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range3, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range3, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range3, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range3, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range3, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range3, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range3, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range3, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range3, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range3, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range3, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range3, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range3, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range4, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range4, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range4, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range4, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range4, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range4, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range4, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range4, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range5, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range5, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range5, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range5, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range5, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range5, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range5, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range5, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range5, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range5, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range5, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range5, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range5, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range5, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range5, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range5, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range5, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range5, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range5, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range5, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range6, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range6, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range6, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range6, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range6, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range6, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range6, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range6, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range6, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range6, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range6, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range6, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range6, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range6, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range6, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range6, 15), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2), diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c index c8d18a2d3a88..93006626028d 100644 --- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c +++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c @@ -597,7 +597,7 @@ static const unsigned usb2_pins[] = {50, 51}; static const int usb2_muxvals[] = {0, 0}; static const unsigned usb3_pins[] = {52, 53}; static const int usb3_muxvals[] = {0, 0}; -static const unsigned port_range_pins[] = { +static const unsigned port_range0_pins[] = { 168, 169, 170, 171, 172, 173, 174, 175, /* PORT0x */ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */ @@ -609,23 +609,8 @@ static const unsigned port_range_pins[] = { 75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */ 83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */ 91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ - 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */ - 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */ - 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ - 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ - 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */ - 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */ - 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */ - 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */ - 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */ - 163, 164, 165, 166, 167, /* PORT25x */ }; -static const int port_range_muxvals[] = { +static const int port_range0_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ @@ -637,21 +622,38 @@ static const int port_range_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */ +}; +static const unsigned port_range1_pins[] = { + 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */ + 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */ + 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */ +}; +static const int port_range1_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */ +}; +static const unsigned port_range2_pins[] = { + 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */ +}; +static const int port_range2_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */ - -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */ +}; +static const unsigned port_range3_pins[] = { + 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */ + 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */ + 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */ + 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */ + 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */ + 163, 164, 165, 166, 167, /* PORT250-254 */ +}; +static const int port_range3_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */ - 15, 15, 15, 15, 15, /* PORT25x */ + 15, 15, 15, 15, 15, /* PORT250-254 */ }; static const unsigned xirq_pins[] = { 149, 150, 151, 152, 153, 154, 155, 156, /* XIRQ0-7 */ @@ -695,174 +697,177 @@ static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = { UNIPHIER_PINCTRL_GROUP(usb1), UNIPHIER_PINCTRL_GROUP(usb2), UNIPHIER_PINCTRL_GROUP(usb3), - UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2), + UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives), - UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0), - UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1), - UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2), - UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3), - UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4), - UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5), - UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6), - UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7), - UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8), - UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9), - UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10), - UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11), - UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12), - UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13), - UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14), - UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15), - UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16), - UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17), - UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18), - UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19), - UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20), - UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21), - UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22), - UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23), - UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24), - UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25), - UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26), - UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range, 27), - UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range, 28), - UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range, 29), - UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range, 30), - UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range, 31), - UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range, 32), - UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range, 33), - UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range, 34), - UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range, 35), - UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range, 36), - UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range, 37), - UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range, 38), - UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range, 39), - UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range, 40), - UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range, 41), - UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range, 42), - UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43), - UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44), - UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45), - UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46), - UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47), - UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48), - UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range, 49), - UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range, 50), - UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51), - UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52), - UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53), - UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54), - UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55), - UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56), - UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57), - UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58), - UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59), - UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60), - UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61), - UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62), - UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63), - UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64), - UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65), - UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66), - UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67), - UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68), - UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69), - UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70), - UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71), - UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72), - UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73), - UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74), - UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75), - UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76), - UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77), - UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78), - UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79), - UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80), - UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81), - UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82), - UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83), - UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84), - UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85), - UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86), - UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87), - UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96), - UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97), - UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98), - UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99), - UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100), - UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101), - UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102), - UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103), - UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104), - UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105), - UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106), - UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107), - UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108), - UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109), - UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110), - UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111), - UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112), - UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113), - UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114), - UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115), - UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116), - UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117), - UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118), - UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119), - UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144), - UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145), - UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146), - UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147), - UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148), - UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149), - UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150), - UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151), - UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160), - UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161), - UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162), - UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163), - UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164), - UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165), - UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166), - UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167), - UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168), - UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169), - UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170), - UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171), - UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172), - UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173), - UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174), - UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175), - UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176), - UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177), - UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178), - UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179), - UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range, 180), - UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range, 181), - UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range, 182), - UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range, 183), - UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184), - UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185), - UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186), - UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187), - UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188), - UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189), - UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190), - UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191), - UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192), - UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193), - UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194), - UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195), - UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196), - UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197), - UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198), - UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199), - UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range, 200), - UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range, 201), - UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range, 202), - UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range, 203), - UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range, 204), + UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24), + UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25), + UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26), + UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27), + UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28), + UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29), + UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30), + UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31), + UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32), + UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33), + UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34), + UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35), + UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36), + UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37), + UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38), + UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39), + UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40), + UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41), + UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42), + UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43), + UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44), + UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45), + UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46), + UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47), + UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48), + UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49), + UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50), + UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51), + UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52), + UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53), + UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54), + UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55), + UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56), + UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57), + UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58), + UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59), + UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60), + UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61), + UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62), + UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63), + UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64), + UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65), + UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66), + UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67), + UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68), + UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69), + UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70), + UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71), + UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72), + UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73), + UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74), + UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75), + UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76), + UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77), + UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78), + UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79), + UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80), + UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81), + UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82), + UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83), + UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84), + UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85), + UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86), + UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87), + UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range2, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range2, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range2, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range2, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range2, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range2, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range2, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range2, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range3, 0), + UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range3, 1), + UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range3, 2), + UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range3, 3), + UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range3, 4), + UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range3, 5), + UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range3, 6), + UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range3, 7), + UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range3, 8), + UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range3, 9), + UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range3, 10), + UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range3, 11), + UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range3, 12), + UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range3, 13), + UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range3, 14), + UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range3, 15), + UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range3, 16), + UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range3, 17), + UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range3, 18), + UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range3, 19), + UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range3, 20), + UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range3, 21), + UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range3, 22), + UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range3, 23), + UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range3, 24), + UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range3, 25), + UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range3, 26), + UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range3, 27), + UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range3, 28), + UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range3, 29), + UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range3, 30), + UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range3, 31), + UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range3, 32), + UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range3, 33), + UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range3, 34), + UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range3, 35), + UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range3, 36), + UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range3, 37), + UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range3, 38), + UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range3, 39), + UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range3, 40), + UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range3, 41), + UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range3, 42), + UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range3, 43), + UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range3, 44), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2), diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 3c52867dfe28..d145e0d90227 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1241,6 +1241,8 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI depends on SCSI_FC_ATTRS + depends on NVME_TARGET_FC || NVME_TARGET_FC=n + depends on NVME_FC || NVME_FC=n select CRC_T10DIF ---help--- This lpfc driver supports the Emulex LightPulse diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 0391fc317003..f6130e8b1ca1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2946,7 +2946,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) } /* Move PUREX, ABTS RX & RIDA to ATIOQ */ - if (ql2xmvasynctoatio) { + if (ql2xmvasynctoatio && + (IS_QLA83XX(ha) || IS_QLA27XX(ha))) { if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_11; @@ -2958,7 +2959,9 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", __func__, ha->fw_options[1], ha->fw_options[2], ha->fw_options[3], vha->host->active_mode); - qla2x00_set_fw_options(vha, ha->fw_options); + + if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3]) + qla2x00_set_fw_options(vha, ha->fw_options); /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 99e16ac479e3..a95eb022fb89 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -45,23 +45,23 @@ static struct kmem_cache *scsi_sense_isadma_cache; static DEFINE_MUTEX(scsi_sense_cache_mutex); static inline struct kmem_cache * -scsi_select_sense_cache(struct Scsi_Host *shost) +scsi_select_sense_cache(bool unchecked_isa_dma) { - return shost->unchecked_isa_dma ? - scsi_sense_isadma_cache : scsi_sense_cache; + return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; } -static void scsi_free_sense_buffer(struct Scsi_Host *shost, - unsigned char *sense_buffer) +static void scsi_free_sense_buffer(bool unchecked_isa_dma, + unsigned char *sense_buffer) { - kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer); + kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma), + sense_buffer); } -static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost, +static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma, gfp_t gfp_mask, int numa_node) { - return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask, - numa_node); + return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma), + gfp_mask, numa_node); } int scsi_init_sense_cache(struct Scsi_Host *shost) @@ -69,7 +69,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) struct kmem_cache *cache; int ret = 0; - cache = scsi_select_sense_cache(shost); + cache = scsi_select_sense_cache(shost->unchecked_isa_dma); if (cache) return 0; @@ -1138,6 +1138,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { void *buf = cmd->sense_buffer; void *prot = cmd->prot_sdb; + unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; unsigned long flags; /* zero out the cmd, except for the embedded scsi_request */ @@ -1147,6 +1148,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) cmd->device = dev; cmd->sense_buffer = buf; cmd->prot_sdb = prot; + cmd->flags = unchecked_isa_dma; INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies; @@ -1847,6 +1849,7 @@ static int scsi_mq_prep_fn(struct request *req) struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; unsigned char *sense_buf = cmd->sense_buffer; + unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; struct scatterlist *sg; /* zero out the cmd, except for the embedded scsi_request */ @@ -1858,6 +1861,7 @@ static int scsi_mq_prep_fn(struct request *req) cmd->request = req; cmd->device = sdev; cmd->sense_buffer = sense_buf; + cmd->flags = unchecked_isa_dma; cmd->tag = req->tag; @@ -2004,10 +2008,13 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct Scsi_Host *shost = set->driver_data; + const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - cmd->sense_buffer = - scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node); + if (unchecked_isa_dma) + cmd->flags |= SCMD_UNCHECKED_ISA_DMA; + cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, + GFP_KERNEL, numa_node); if (!cmd->sense_buffer) return -ENOMEM; cmd->req.sense = cmd->sense_buffer; @@ -2017,10 +2024,10 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) { - struct Scsi_Host *shost = set->driver_data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, + cmd->sense_buffer); } static int scsi_map_queues(struct blk_mq_tag_set *set) @@ -2093,11 +2100,15 @@ EXPORT_SYMBOL_GPL(__scsi_init_queue); static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) { struct Scsi_Host *shost = q->rq_alloc_data; + const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); memset(cmd, 0, sizeof(*cmd)); - cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE); + if (unchecked_isa_dma) + cmd->flags |= SCMD_UNCHECKED_ISA_DMA; + cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp, + NUMA_NO_NODE); if (!cmd->sense_buffer) goto fail; cmd->req.sense = cmd->sense_buffer; @@ -2111,19 +2122,19 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) return 0; fail_free_sense: - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer); fail: return -ENOMEM; } static void scsi_exit_rq(struct request_queue *q, struct request *rq) { - struct Scsi_Host *shost = q->rq_alloc_data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); if (cmd->prot_sdb) kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, + cmd->sense_buffer); } struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 6f7128f49c30..27a6d3c6cb7c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -384,11 +384,12 @@ static void scsi_target_reap_ref_release(struct kref *kref) = container_of(kref, struct scsi_target, reap_ref); /* - * if we get here and the target is still in the CREATED state that + * if we get here and the target is still in a CREATED state that * means it was allocated but never made visible (because a scan * turned up no LUNs), so don't call device_del() on it. */ - if (starget->state != STARGET_CREATED) { + if ((starget->state != STARGET_CREATED) && + (starget->state != STARGET_CREATED_REMOVE)) { transport_remove_device(&starget->dev); device_del(&starget->dev); } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 82dfe07b1d47..3a6f557ec128 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1370,11 +1370,15 @@ void scsi_remove_target(struct device *dev) spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(starget, &shost->__targets, siblings) { if (starget->state == STARGET_DEL || - starget->state == STARGET_REMOVE) + starget->state == STARGET_REMOVE || + starget->state == STARGET_CREATED_REMOVE) continue; if (starget->dev.parent == dev || &starget->dev == dev) { kref_get(&starget->reap_ref); - starget->state = STARGET_REMOVE; + if (starget->state == STARGET_CREATED) + starget->state = STARGET_CREATED_REMOVE; + else + starget->state = STARGET_REMOVE; spin_unlock_irqrestore(shost->host_lock, flags); __scsi_remove_target(starget); scsi_target_reap(starget); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 82c33a6edbea..aa6f1debeaa7 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -751,29 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, return count; } -static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) -{ - switch (hp->dxfer_direction) { - case SG_DXFER_NONE: - if (hp->dxferp || hp->dxfer_len > 0) - return false; - return true; - case SG_DXFER_TO_DEV: - case SG_DXFER_FROM_DEV: - case SG_DXFER_TO_FROM_DEV: - if (!hp->dxferp || hp->dxfer_len == 0) - return false; - return true; - case SG_DXFER_UNKNOWN: - if ((!hp->dxferp && hp->dxfer_len) || - (hp->dxferp && hp->dxfer_len == 0)) - return false; - return true; - default: - return false; - } -} - static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) @@ -794,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); - if (!sg_is_valid_dxfer(hp)) + if (hp->dxfer_len >= SZ_256M) return -EINVAL; k = sg_start_req(srp, cmnd); diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index f8dbfeee6c63..b2d80d449244 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -796,6 +796,16 @@ static int virtscsi_map_queues(struct Scsi_Host *shost) return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2); } +/* + * The host guarantees to respond to each command, although I/O + * latencies might be higher than on bare metal. Reset the timer + * unconditionally to give the host a chance to perform EH. + */ +static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) +{ + return BLK_EH_RESET_TIMER; +} + static struct scsi_host_template virtscsi_host_template_single = { .module = THIS_MODULE, .name = "Virtio SCSI HBA", @@ -806,6 +816,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .eh_timed_out = virtscsi_eh_timed_out, .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, @@ -826,6 +837,8 @@ static struct scsi_host_template virtscsi_host_template_multi = { .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .eh_timed_out = virtscsi_eh_timed_out, + .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, .dma_boundary = UINT_MAX, diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 1eb83c9613d5..78c885d80c96 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -269,6 +269,7 @@ struct atmel_spi_caps { bool is_spi2; bool has_wdrbt; bool has_dma_support; + bool has_pdc_support; }; /* @@ -1426,7 +1427,28 @@ static void atmel_get_caps(struct atmel_spi *as) as->caps.is_spi2 = version > 0x121; as->caps.has_wdrbt = version >= 0x210; +#ifdef CONFIG_SOC_SAM_V4_V5 + /* + * Atmel SoCs based on ARM9 (SAM9x) cores should not use spi_map_buf() + * since this later function tries to map buffers with dma_map_sg() + * even if they have not been allocated inside DMA-safe areas. + * On SoCs based on Cortex A5 (SAMA5Dx), it works anyway because for + * those ARM cores, the data cache follows the PIPT model. + * Also the L2 cache controller of SAMA5D2 uses the PIPT model too. + * In case of PIPT caches, there cannot be cache aliases. + * However on ARM9 cores, the data cache follows the VIVT model, hence + * the cache aliases issue can occur when buffers are allocated from + * DMA-unsafe areas, by vmalloc() for instance, where cache coherency is + * not taken into account or at least not handled completely (cache + * lines of aliases are not invalidated). + * This is not a theorical issue: it was reproduced when trying to mount + * a UBI file-system on a at91sam9g35ek board. + */ + as->caps.has_dma_support = false; +#else as->caps.has_dma_support = version >= 0x212; +#endif + as->caps.has_pdc_support = version < 0x212; } /*-------------------------------------------------------------------------*/ @@ -1567,7 +1589,7 @@ static int atmel_spi_probe(struct platform_device *pdev) } else if (ret == -EPROBE_DEFER) { return ret; } - } else { + } else if (as->caps.has_pdc_support) { as->use_pdc = true; } diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index 2b9b0941d9eb..6d23226e5f69 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c @@ -365,11 +365,23 @@ static int spmi_drv_remove(struct device *dev) return 0; } +static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + int ret; + + ret = of_device_uevent_modalias(dev, env); + if (ret != -ENODEV) + return ret; + + return 0; +} + static struct bus_type spmi_bus_type = { .name = "spmi", .match = spmi_device_match, .probe = spmi_drv_probe, .remove = spmi_drv_remove, + .uevent = spmi_drv_uevent, }; /** diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h index b76db1b2e197..a291b1225515 100644 --- a/drivers/staging/android/uapi/ion.h +++ b/drivers/staging/android/uapi/ion.h @@ -130,24 +130,6 @@ struct ion_heap_query { #define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ struct ion_allocation_data) -/** - * DOC: ION_IOC_FREE - free memory - * - * Takes an ion_handle_data struct and frees the handle. - */ -#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) - -/** - * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation - * - * Takes an ion_fd_data struct with the handle field populated with a valid - * opaque handle. Returns the struct with the fd field set to a file - * descriptor open in the current address space. This file descriptor - * can then be passed to another process. The corresponding opaque handle can - * be retrieved via ION_IOC_IMPORT. - */ -#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) - /** * DOC: ION_IOC_HEAP_QUERY - information about available heaps * diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index f191c2a75732..11d809780ee0 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf, continue; } + set_current_state(TASK_RUNNING); wp = async->buf_write_ptr; n1 = min(n, async->prealloc_bufsz - wp); n2 = n - n1; @@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes, } continue; } + + set_current_state(TASK_RUNNING); rp = async->buf_read_ptr; n1 = min(n, async->prealloc_bufsz - rp); n2 = n - n1; @@ -2915,6 +2918,7 @@ static int __init comedi_init(void) dev = comedi_alloc_board_minor(NULL); if (IS_ERR(dev)) { comedi_cleanup_board_minors(); + class_destroy(comedi_class); cdev_del(&comedi_cdev); unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index b2e382888981..2f7bfc1c59e5 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev, /* following line: 2-1 per STC */ ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG); ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG); - /* following line: N-1 per STC */ - ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG); + ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG); } else { /* TRIG_EXT */ /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */ devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c index a6a8393d6664..3e00df74b18c 100644 --- a/drivers/staging/iio/resolver/ad2s1210.c +++ b/drivers/staging/iio/resolver/ad2s1210.c @@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev, long m) { struct ad2s1210_state *st = iio_priv(indio_dev); - bool negative; + u16 negative; int ret = 0; u16 pos; s16 vel; diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 0db662d6abdd..4fb55e4d7483 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) ibmsg = tx->tx_msg; ibmsg->ibm_u.immediate.ibim_hdr = *hdr; - copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE, - &from); + rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob, + &from); + if (rc != payload_nob) { + kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); + return -EFAULT; + } + nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); @@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, break; } - copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, - IBLND_MSG_SIZE, to); + rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen, + to); + if (rc != rlen) { + rc = -EFAULT; + break; + } + + rc = 0; lnet_finalize(ni, lntmsg, 0); break; diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 963235fd7292..56cd4e5e51b2 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -43,7 +43,9 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ {} /* Terminating entry */ }; diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 386d4adcd91d..fa557e89f340 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -1053,6 +1053,26 @@ static int sm750fb_frambuffer_alloc(struct sm750_dev *sm750_dev, int fbidx) return err; } +static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + bool primary = false; + + ap = alloc_apertures(1); + if (!ap) + return -ENOMEM; + + ap->ranges[0].base = pci_resource_start(pdev, 0); + ap->ranges[0].size = pci_resource_len(pdev, 0); +#ifdef CONFIG_X86 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW; +#endif + remove_conflicting_framebuffers(ap, "sm750_fb1", primary); + kfree(ap); + return 0; +} + static int lynxfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev, int fbidx; int err; + err = lynxfb_kick_out_firmware_fb(pdev); + if (err) + return err; + /* enable device */ err = pcim_enable_device(pdev); if (err) diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 028f54b453d0..b7afef056659 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -513,6 +513,9 @@ static int vnt_start(struct ieee80211_hw *hw) goto free_all; } + if (vnt_key_init_table(priv)) + goto free_all; + priv->int_interval = 1; /* bInterval is set to 1 */ vnt_int_start_interrupt(priv); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 3fdca2cdd8da..db843e3f355a 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -418,6 +418,7 @@ int iscsit_reset_np_thread( return 0; } np->np_thread_state = ISCSI_NP_THREAD_RESET; + atomic_inc(&np->np_reset_count); if (np->np_thread) { spin_unlock_bh(&np->np_thread_lock); @@ -2173,6 +2174,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); cmd->data_direction = DMA_NONE; + kfree(cmd->text_in_ptr); cmd->text_in_ptr = NULL; return 0; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 535a8e06a401..0dd4c45f7575 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -781,6 +781,7 @@ DEF_TPG_ATTRIB(default_erl); DEF_TPG_ATTRIB(t10_pi); DEF_TPG_ATTRIB(fabric_prot_type); DEF_TPG_ATTRIB(tpg_enabled_sendtargets); +DEF_TPG_ATTRIB(login_keys_workaround); static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_attr_authentication, @@ -796,6 +797,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_attr_t10_pi, &iscsi_tpg_attrib_attr_fabric_prot_type, &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets, + &iscsi_tpg_attrib_attr_login_keys_workaround, NULL, }; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 92b96b51d506..e491cf75e92d 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1237,9 +1237,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) flush_signals(current); spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; + spin_unlock_bh(&np->np_thread_lock); complete(&np->np_restart_comp); + return 1; } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { spin_unlock_bh(&np->np_thread_lock); goto exit; @@ -1272,7 +1274,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) goto exit; } else if (rc < 0) { spin_lock_bh(&np->np_thread_lock); - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { + if (atomic_dec_if_positive(&np->np_reset_count) >= 0) { + np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; spin_unlock_bh(&np->np_thread_lock); complete(&np->np_restart_comp); iscsit_put_transport(conn->conn_transport); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 6f88b31242b0..8b957b8072b9 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -886,7 +886,8 @@ static int iscsi_target_handle_csg_zero( SENDER_TARGET, login->rsp_buf, &login->rsp_length, - conn->param_list); + conn->param_list, + conn->tpg->tpg_attrib.login_keys_workaround); if (ret < 0) return -1; @@ -956,7 +957,8 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log SENDER_TARGET, login->rsp_buf, &login->rsp_length, - conn->param_list); + conn->param_list, + conn->tpg->tpg_attrib.login_keys_workaround); if (ret < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, ISCSI_LOGIN_STATUS_INIT_ERR); diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index fce627628200..caab1045742d 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -765,7 +765,8 @@ static int iscsi_check_for_auth_key(char *key) return 0; } -static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) +static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param, + bool keys_workaround) { if (IS_TYPE_BOOL_AND(param)) { if (!strcmp(param->value, NO)) @@ -773,19 +774,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) } else if (IS_TYPE_BOOL_OR(param)) { if (!strcmp(param->value, YES)) SET_PSTATE_REPLY_OPTIONAL(param); - /* - * Required for gPXE iSCSI boot client - */ - if (!strcmp(param->name, IMMEDIATEDATA)) - SET_PSTATE_REPLY_OPTIONAL(param); + + if (keys_workaround) { + /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, IMMEDIATEDATA)) + SET_PSTATE_REPLY_OPTIONAL(param); + } } else if (IS_TYPE_NUMBER(param)) { if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) SET_PSTATE_REPLY_OPTIONAL(param); - /* - * Required for gPXE iSCSI boot client - */ - if (!strcmp(param->name, MAXCONNECTIONS)) - SET_PSTATE_REPLY_OPTIONAL(param); + + if (keys_workaround) { + /* + * Required for Mellanox Flexboot PXE boot ROM + */ + if (!strcmp(param->name, FIRSTBURSTLENGTH)) + SET_PSTATE_REPLY_OPTIONAL(param); + + /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, MAXCONNECTIONS)) + SET_PSTATE_REPLY_OPTIONAL(param); + } } else if (IS_PHASE_DECLARATIVE(param)) SET_PSTATE_REPLY_OPTIONAL(param); } @@ -1422,7 +1435,8 @@ int iscsi_encode_text_output( u8 sender, char *textbuf, u32 *length, - struct iscsi_param_list *param_list) + struct iscsi_param_list *param_list, + bool keys_workaround) { char *output_buf = NULL; struct iscsi_extra_response *er; @@ -1458,7 +1472,8 @@ int iscsi_encode_text_output( *length += 1; output_buf = textbuf + *length; SET_PSTATE_PROPOSER(param); - iscsi_check_proposer_for_optional_reply(param); + iscsi_check_proposer_for_optional_reply(param, + keys_workaround); pr_debug("Sending key: %s=%s\n", param->name, param->value); } diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index 9962ccf0ccd7..c47b73f57528 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h @@ -46,7 +46,7 @@ extern int iscsi_extract_key_value(char *, char **, char **); extern int iscsi_update_param_value(struct iscsi_param *, char *); extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *); extern int iscsi_encode_text_output(u8, u8, char *, u32 *, - struct iscsi_param_list *); + struct iscsi_param_list *, bool); extern int iscsi_check_negotiated_keys(struct iscsi_param_list *); extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *, struct iscsi_param_list *); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 2e7e08dbda48..89813a162c34 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -227,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) a->t10_pi = TA_DEFAULT_T10_PI; a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS; + a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND; } int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) @@ -899,3 +900,21 @@ int iscsit_ta_tpg_enabled_sendtargets( return 0; } + +int iscsit_ta_login_keys_workaround( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->login_keys_workaround = flag; + pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ", + tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF"); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index ceba29851167..59fd3cabe89d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -48,5 +48,6 @@ extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32); +extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32); #endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 310d9e55c6eb..2d9ad10de3b3 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) mutex_lock(&tpg->acl_node_mutex); if (acl->dynamic_node_acl) acl->dynamic_node_acl = 0; - list_del(&acl->acl_list); + list_del_init(&acl->acl_list); mutex_unlock(&tpg->acl_node_mutex); target_shutdown_sessions(acl); @@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) * in transport_deregister_session(). */ list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); core_tpg_wait_for_nacl_pr_ref(nacl); core_free_device_list_for_node(nacl, se_tpg); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f1b3a46bdcaf..884780d2ec69 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref) } mutex_lock(&se_tpg->acl_node_mutex); - list_del(&nacl->acl_list); + list_del_init(&nacl->acl_list); mutex_unlock(&se_tpg->acl_node_mutex); core_tpg_wait_for_nacl_pr_ref(nacl); @@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess) spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); if (se_nacl->dynamic_stop) - list_del(&se_nacl->acl_list); + list_del_init(&se_nacl->acl_list); } mutex_unlock(&se_tpg->acl_node_mutex); @@ -730,6 +730,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) if (cmd->transport_state & CMD_T_ABORTED || cmd->transport_state & CMD_T_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); + /* + * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(), + * release se_device->caw_sem obtained by sbc_compare_and_write() + * since target_complete_ok_work() or target_complete_failure_work() + * won't be called to invoke the normal CAW completion callbacks. + */ + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { + up(&dev->caw_sem); + } complete_all(&cmd->t_transport_stop_comp); return; } else if (!success) { diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index beb5f098f32d..05804227234d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -437,7 +437,7 @@ static int scatter_data_area(struct tcmu_dev *udev, to_offset = get_block_offset_user(udev, dbi, block_remaining); offset = DATA_BLOCK_SIZE - block_remaining; - to = (void *)(unsigned long)to + offset; + to += offset; if (*iov_cnt != 0 && to_offset == iov_tail(udev, *iov)) { @@ -510,7 +510,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, copy_bytes = min_t(size_t, sg_remaining, block_remaining); offset = DATA_BLOCK_SIZE - block_remaining; - from = (void *)(unsigned long)from + offset; + from += offset; tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from, copy_bytes); @@ -699,25 +699,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); entry = (void *) mb + CMDR_OFF + cmd_head; - tcmu_flush_dcache_range(entry, sizeof(*entry)); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; + tcmu_flush_dcache_range(entry, sizeof(*entry)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); + tcmu_flush_dcache_range(mb, sizeof(*mb)); cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ WARN_ON(cmd_head != 0); } entry = (void *) mb + CMDR_OFF + cmd_head; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + memset(entry, 0, command_size); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); entry->hdr.cmd_id = tcmu_cmd->cmd_id; - entry->hdr.kflags = 0; - entry->hdr.uflags = 0; /* Handle allocating space from the data area */ tcmu_cmd_reset_dbi_cur(tcmu_cmd); @@ -736,11 +735,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } entry->req.iov_cnt = iov_cnt; - entry->req.iov_dif_cnt = 0; /* Handle BIDI commands */ + iov_cnt = 0; if (se_cmd->se_cmd_flags & SCF_BIDI) { - iov_cnt = 0; iov++; ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, @@ -753,8 +751,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) pr_err("tcmu: alloc and scatter bidi data failed\n"); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - entry->req.iov_bidi_cnt = iov_cnt; } + entry->req.iov_bidi_cnt = iov_cnt; /* * Recalaulate the command's base size and size according diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 69d0f430b2d1..be29489dd247 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -153,8 +153,10 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) mutex_lock(&cooling_list_lock); list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { + unsigned long level = get_level(cpufreq_dev, freq); + mutex_unlock(&cooling_list_lock); - return get_level(cpufreq_dev, freq); + return level; } } mutex_unlock(&cooling_list_lock); diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c index e9a1fe342760..71d35f3c9215 100644 --- a/drivers/thermal/max77620_thermal.c +++ b/drivers/thermal/max77620_thermal.c @@ -104,8 +104,6 @@ static int max77620_thermal_probe(struct platform_device *pdev) return -EINVAL; } - pdev->dev.of_node = pdev->dev.parent->of_node; - mtherm->dev = &pdev->dev; mtherm->rmap = dev_get_regmap(pdev->dev.parent, NULL); if (!mtherm->rmap) { @@ -113,6 +111,14 @@ static int max77620_thermal_probe(struct platform_device *pdev) return -ENODEV; } + /* + * Drop any current reference to a device-tree node and get a + * reference to the parent's node which will be balanced on reprobe or + * on platform-device release. + */ + of_node_put(pdev->dev.of_node); + pdev->dev.of_node = of_node_get(pdev->dev.parent->of_node); + mtherm->tz_device = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mtherm, &max77620_thermal_ops); if (IS_ERR(mtherm->tz_device)) { diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index bbefddd92bfe..92606b1e55bd 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1340,29 +1340,13 @@ static int imx_startup(struct uart_port *port) imx_enable_ms(&sport->port); /* - * If the serial port is opened for reading start RX DMA immediately - * instead of waiting for RX FIFO interrupts. In our iMX53 the average - * delay for the first reception dropped from approximately 35000 - * microseconds to 1000 microseconds. + * Start RX DMA immediately instead of waiting for RX FIFO interrupts. + * In our iMX53 the average delay for the first reception dropped from + * approximately 35000 microseconds to 1000 microseconds. */ if (sport->dma_is_enabled) { - struct tty_struct *tty = sport->port.state->port.tty; - struct tty_file_private *file_priv; - int readcnt = 0; - - spin_lock(&tty->files_lock); - - if (!list_empty(&tty->tty_files)) - list_for_each_entry(file_priv, &tty->tty_files, list) - if (!(file_priv->file->f_flags & O_WRONLY)) - readcnt++; - - spin_unlock(&tty->files_lock); - - if (readcnt > 0) { - imx_disable_rx_int(sport); - start_rx_dma(sport); - } + imx_disable_rx_int(sport); + start_rx_dma(sport); } spin_unlock_irqrestore(&sport->port.lock, flags); diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 71707e8e6e3f..9f1bc67e66ae 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1085,10 +1085,12 @@ static ssize_t rx_trigger_store(struct device *dev, { struct uart_port *port = dev_get_drvdata(dev); struct sci_port *sci = to_sci_port(port); + int ret; long r; - if (kstrtol(buf, 0, &r) == -EINVAL) - return -EINVAL; + ret = kstrtol(buf, 0, &r); + if (ret) + return ret; sci->rx_trigger = scif_set_rtrg(port, r); if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) @@ -1116,10 +1118,12 @@ static ssize_t rx_fifo_timeout_store(struct device *dev, { struct uart_port *port = dev_get_drvdata(dev); struct sci_port *sci = to_sci_port(port); + int ret; long r; - if (kstrtol(buf, 0, &r) == -EINVAL) - return -EINVAL; + ret = kstrtol(buf, 0, &r); + if (ret) + return ret; sci->rx_fifo_timeout = r; scif_set_rtrg(port, 1); if (r > 0) diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index f5335be344f6..6b0ca65027d0 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -758,6 +758,7 @@ static int asc_init_port(struct asc_port *ascport, if (IS_ERR(ascport->pinctrl)) { ret = PTR_ERR(ascport->pinctrl); dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret); + return ret; } ascport->states[DEFAULT] = diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 9c9945284bcf..bacc48b0b4b8 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2709,13 +2709,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) * related to the kernel should not use this. */ data = vt_get_shift_state(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_GETMOUSEREPORTING: console_lock(); /* May be overkill */ data = mouse_reporting(); console_unlock(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETVESABLANK: console_lock(); @@ -2724,7 +2724,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) break; case TIOCL_GETKMSGREDIRECT: data = vt_get_kmsg_redirect(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETKMSGREDIRECT: if (!capable(CAP_SYS_ADMIN)) { diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 5357d83bbda2..5e056064259c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1829,6 +1829,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, + { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ .driver_info = CLEAR_HALT_CONDITIONS, diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 5dea98358c05..cc4121605c53 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1878,7 +1878,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev, /* No more submits can occur */ spin_lock_irq(&hcd_urb_list_lock); rescan: - list_for_each_entry (urb, &ep->urb_list, urb_list) { + list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) { int is_in; if (urb->unlinked) @@ -2475,6 +2475,8 @@ void usb_hc_died (struct usb_hcd *hcd) } if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { hcd = hcd->shared_hcd; + clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags); + set_bit(HCD_FLAG_DEAD, &hcd->flags); if (hcd->rh_registered) { clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b8bb20d7acdb..0881a3e8131c 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -4730,7 +4730,8 @@ hub_power_remaining(struct usb_hub *hub) static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, u16 portchange) { - int status, i; + int status = -ENODEV; + int i; unsigned unit_load; struct usb_device *hdev = hub->hdev; struct usb_hcd *hcd = bus_to_hcd(hdev->bus); @@ -4934,9 +4935,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, done: hub_port_disable(hub, port1, 1); - if (hcd->driver->relinquish_port && !hub->hdev->parent) - hcd->driver->relinquish_port(hcd, port1); - + if (hcd->driver->relinquish_port && !hub->hdev->parent) { + if (status != -ENOTCONN && status != -ENODEV) + hcd->driver->relinquish_port(hcd, port1); + } } /* Handle physical or logical connection change events. diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 96b21b0dac1e..574da2b4529c 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, + /* Avision AV600U */ { USB_DEVICE(0x0638, 0x0a13), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -223,6 +226,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Blackmagic Design UltraStudio SDI */ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, + /* Hauppauge HVR-950q */ + { USB_DEVICE(0x2040, 0x7200), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -245,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = { { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME }, /* Logitech Optical Mouse M90/M100 */ { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 2776cfe64c09..ef9cf4a21afe 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -127,6 +127,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle, */ #define USB_ACPI_LOCATION_VALID (1 << 31) +static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent, + int raw) +{ + struct acpi_device *adev; + + if (!parent) + return NULL; + + list_for_each_entry(adev, &parent->children, node) { + if (acpi_device_adr(adev) == raw) + return adev; + } + + return acpi_find_child_device(parent, raw, false); +} + static struct acpi_device *usb_acpi_find_companion(struct device *dev) { struct usb_device *udev; @@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev) int raw; raw = usb_hcd_find_raw_port_number(hcd, port1); - adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev), - raw, false); + + adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev), + raw); + if (!adev) return NULL; } else { @@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev) return NULL; acpi_bus_get_device(parent_handle, &adev); - adev = acpi_find_child_device(adev, port1, false); + + adev = usb_acpi_find_port(adev, port1); + if (!adev) return NULL; } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 28b053cacc90..62e1906bb2f3 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -416,6 +416,8 @@ static void usb_release_dev(struct device *dev) usb_destroy_configuration(udev); usb_release_bos_descriptor(udev); + if (udev->parent) + of_node_put(dev->of_node); usb_put_hcd(hcd); kfree(udev->product); kfree(udev->manufacturer); diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index dfbf464eb88c..505676fd3ba4 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -230,7 +230,7 @@ static int st_dwc3_probe(struct platform_device *pdev) dwc3_data->syscfg_reg_off = res->start; - dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", + dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n", dwc3_data->glue_base, dwc3_data->syscfg_reg_off); dwc3_data->rstc_pwrdn = diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index aea9a5b948b4..58d20e3decb5 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1215,12 +1215,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) return -ESHUTDOWN; } - if (WARN(req->dep != dep, "request %p belongs to '%s'\n", - &req->request, req->dep->name)) { - dev_err(dwc->dev, "%s: request %p belongs to '%s'\n", - dep->name, &req->request, req->dep->name); + if (WARN(req->dep != dep, "request %pK belongs to '%s'\n", + &req->request, req->dep->name)) return -EINVAL; - } pm_runtime_get(dwc->dev); @@ -1396,7 +1393,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep, } goto out1; } - dev_err(dwc->dev, "request %p was not queued to %s\n", + dev_err(dwc->dev, "request %pK was not queued to %s\n", request, ep->name); ret = -EINVAL; goto out0; diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index cd4c88529721..9f3addfca744 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -758,21 +758,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep return usb3_req; } -static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, - struct renesas_usb3_request *usb3_req, int status) +static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, + int status) { struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); - unsigned long flags; dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, status); usb3_req->req.status = status; - spin_lock_irqsave(&usb3->lock, flags); usb3_ep->started = false; list_del_init(&usb3_req->queue); - spin_unlock_irqrestore(&usb3->lock, flags); + spin_unlock(&usb3->lock); usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); + spin_lock(&usb3->lock); +} + +static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, + struct renesas_usb3_request *usb3_req, int status) +{ + struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); + unsigned long flags; + + spin_lock_irqsave(&usb3->lock, flags); + __usb3_request_done(usb3_ep, usb3_req, status); + spin_unlock_irqrestore(&usb3->lock, flags); } static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a9a1e4c40480..c8f38649f749 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -77,6 +77,16 @@ #define USB_INTEL_USB3_PSSEN 0xD8 #define USB_INTEL_USB3PRM 0xDC +/* ASMEDIA quirk use */ +#define ASMT_DATA_WRITE0_REG 0xF8 +#define ASMT_DATA_WRITE1_REG 0xFC +#define ASMT_CONTROL_REG 0xE0 +#define ASMT_CONTROL_WRITE_BIT 0x02 +#define ASMT_WRITEREG_CMD 0x10423 +#define ASMT_FLOWCTL_ADDR 0xFA30 +#define ASMT_FLOWCTL_DATA 0xBA +#define ASMT_PSEUDO_DATA 0 + /* * amd_chipset_gen values represent AMD different chipset generations */ @@ -88,6 +98,7 @@ enum amd_chipset_gen { AMD_CHIPSET_HUDSON2, AMD_CHIPSET_BOLTON, AMD_CHIPSET_YANGTZE, + AMD_CHIPSET_TAISHAN, AMD_CHIPSET_UNKNOWN, }; @@ -131,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) pinfo->sb_type.gen = AMD_CHIPSET_SB700; else if (rev >= 0x40 && rev <= 0x4f) pinfo->sb_type.gen = AMD_CHIPSET_SB800; + } + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x145c, NULL); + if (pinfo->smbus_dev) { + pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; } else { pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); @@ -250,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) { /* Make sure amd chipset type has already been initialized */ usb_amd_find_chipset_info(); - if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) - return 0; - - dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); - return 1; + if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || + amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { + dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); + return 1; + } + return 0; } EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); @@ -412,6 +429,50 @@ void usb_amd_quirk_pll_disable(void) } EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); +static int usb_asmedia_wait_write(struct pci_dev *pdev) +{ + unsigned long retry_count; + unsigned char value; + + for (retry_count = 1000; retry_count > 0; --retry_count) { + + pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value); + + if (value == 0xff) { + dev_err(&pdev->dev, "%s: check_ready ERROR", __func__); + return -EIO; + } + + if ((value & ASMT_CONTROL_WRITE_BIT) == 0) + return 0; + + usleep_range(40, 60); + } + + dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); + return -ETIMEDOUT; +} + +void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) +{ + if (usb_asmedia_wait_write(pdev) != 0) + return; + + /* send command and address to device */ + pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD); + pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR); + pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); + + if (usb_asmedia_wait_write(pdev) != 0) + return; + + /* send data to device */ + pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA); + pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA); + pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); +} +EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol); + void usb_amd_quirk_pll_enable(void) { usb_amd_quirk_pll(0); @@ -1096,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); + +bool usb_xhci_needs_pci_reset(struct pci_dev *pdev) +{ + /* + * Our dear uPD72020{1,2} friend only partially resets when + * asked to via the XHCI interface, and may end up doing DMA + * at the wrong addresses, as it keeps the top 32bit of some + * addresses from its previous programming under obscure + * circumstances. + * Give it a good wack at probe time. Unfortunately, this + * needs to happen before we've had a chance to discover any + * quirk, or the system will be in a rather bad state. + */ + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + (pdev->device == 0x0014 || pdev->device == 0x0015)) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 0222195bd5b0..5582cbafecd4 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -11,13 +11,16 @@ bool usb_amd_prefetch_quirk(void); void usb_amd_dev_put(void); void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); +void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); void sb800_prefetch(struct device *dev, int on); +bool usb_xhci_needs_pci_reset(struct pci_dev *pdev); #else struct pci_dev; static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} +static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} static inline void usb_amd_dev_put(void) {} static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} static inline void sb800_prefetch(struct device *dev, int on) {} diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0dde49c35dd2..00ad677d8ec7 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -603,12 +603,14 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, /* Disable all Device Slots */ xhci_dbg(xhci, "Disable all slots\n"); + spin_unlock_irqrestore(&xhci->lock, *flags); for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { retval = xhci_disable_slot(xhci, NULL, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); } + spin_lock_irqsave(&xhci->lock, *flags); /* Put all ports to the Disable state by clear PP */ xhci_dbg(xhci, "Disable all port (PP = 0)\n"); /* Power off USB3 ports*/ @@ -897,6 +899,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, clear_bit(wIndex, &bus_state->resuming_ports); set_bit(wIndex, &bus_state->rexit_ports); + + xhci_test_and_clear_bit(xhci, port_array, wIndex, + PORT_PLC); xhci_set_link_state(xhci, port_array, wIndex, XDEV_U0); @@ -1461,6 +1466,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd) t2 |= PORT_WKOC_E | PORT_WKCONN_E; t2 &= ~PORT_WKDISC_E; } + if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) && + (hcd->speed < HCD_USB3)) + t2 &= ~PORT_WAKE_BITS; } else t2 &= ~PORT_WAKE_BITS; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1bcf971141c0..cefa223f9f08 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -54,6 +54,13 @@ #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 #define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 +#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba +#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb +#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc + +#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 + static const char hcd_name[] = "xhci_hcd"; static struct hc_driver __read_mostly xhci_pci_hc_driver; @@ -135,6 +142,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_AMD) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if ((pdev->vendor == PCI_VENDOR_ID_AMD) && + ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) || + (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1))) + xhci->quirks |= XHCI_U2_DISABLE_WAKE; + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { xhci->quirks |= XHCI_LPM_SUPPORT; xhci->quirks |= XHCI_INTEL_HOST; @@ -205,6 +219,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x1142) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) + xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL; + if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; @@ -267,6 +285,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) driver = (struct hc_driver *)id->driver_data; + /* For some HW implementation, a XHCI reset is just not enough... */ + if (usb_xhci_needs_pci_reset(dev)) { + dev_info(&dev->dev, "Resetting\n"); + if (pci_reset_function_locked(dev)) + dev_warn(&dev->dev, "Reset failed"); + } + /* Prevent runtime suspending between USB-2 and USB-3 initialization */ pm_runtime_get_noresume(&dev->dev); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 03f63f50afb6..bbb1536ce86b 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -845,13 +845,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, (ep->ep_state & EP_GETTING_NO_STREAMS)) { int stream_id; - for (stream_id = 0; stream_id < ep->stream_info->num_streams; + for (stream_id = 1; stream_id < ep->stream_info->num_streams; stream_id++) { + ring = ep->stream_info->stream_rings[stream_id]; + if (!ring) + continue; + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Killing URBs for slot ID %u, ep index %u, stream %u", - slot_id, ep_index, stream_id + 1); - xhci_kill_ring_urbs(xhci, - ep->stream_info->stream_rings[stream_id]); + slot_id, ep_index, stream_id); + xhci_kill_ring_urbs(xhci, ring); } } else { ring = ep->ring; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 30f47d92a610..dd29dda6479b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -198,6 +198,9 @@ int xhci_reset(struct xhci_hcd *xhci) if (ret) return ret; + if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) + usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Wait for controller to be ready for doorbell rings"); /* @@ -622,8 +625,10 @@ int xhci_run(struct usb_hcd *hcd) if (!command) return -ENOMEM; - xhci_queue_vendor_command(xhci, command, 0, 0, 0, + ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, TRB_TYPE(TRB_NEC_GET_FW)); + if (ret) + xhci_free_command(xhci, command); } xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_run for USB2 roothub"); @@ -1085,6 +1090,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) compliance_mode_recovery_timer_init(xhci); + if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) + usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); + /* Re-enable port polling. */ xhci_dbg(xhci, "%s: starting port polling.\n", __func__); set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 73a28a986d5e..cca87fdbec2c 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1819,6 +1819,8 @@ struct xhci_hcd { /* For controller with a broken Port Disable implementation */ #define XHCI_BROKEN_PORT_PED (1 << 25) #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) +#define XHCI_U2_DISABLE_WAKE (1 << 27) +#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index dbe617a735d8..8bb57f76829d 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) "Could not flush host TX%d fifo: csr: %04x\n", ep->epnum, csr)) return; + mdelay(1); } } diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 623c51300393..f0ce304c5aaf 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -752,8 +752,10 @@ static int usbhsc_resume(struct device *dev) struct usbhs_priv *priv = dev_get_drvdata(dev); struct platform_device *pdev = usbhs_priv_to_pdev(priv); - if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) + if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) { usbhsc_power_ctrl(priv, 1); + usbhs_mod_autonomy_mode(priv); + } usbhs_platform_call(priv, phy_reset, pdev); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 5bc7a6138855..93fba9033b00 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -37,6 +37,7 @@ struct usbhsg_gpriv; struct usbhsg_uep { struct usb_ep ep; struct usbhs_pipe *pipe; + spinlock_t lock; /* protect the pipe */ char ep_name[EP_NAME_SIZE]; @@ -636,10 +637,16 @@ static int usbhsg_ep_enable(struct usb_ep *ep, static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + struct usbhs_pipe *pipe; + unsigned long flags; + int ret = 0; - if (!pipe) - return -EINVAL; + spin_lock_irqsave(&uep->lock, flags); + pipe = usbhsg_uep_to_pipe(uep); + if (!pipe) { + ret = -EINVAL; + goto out; + } usbhsg_pipe_disable(uep); usbhs_pipe_free(pipe); @@ -647,6 +654,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep) uep->pipe->mod_private = NULL; uep->pipe = NULL; +out: + spin_unlock_irqrestore(&uep->lock, flags); + return 0; } @@ -696,8 +706,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + struct usbhs_pipe *pipe; + unsigned long flags; + spin_lock_irqsave(&uep->lock, flags); + pipe = usbhsg_uep_to_pipe(uep); if (pipe) usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); @@ -706,6 +719,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) * even if the pipe is NULL. */ usbhsg_queue_pop(uep, ureq, -ECONNRESET); + spin_unlock_irqrestore(&uep->lock, flags); return 0; } @@ -852,10 +866,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhs_mod *mod = usbhs_mod_get_current(priv); - struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); + struct usbhsg_uep *uep; struct device *dev = usbhs_priv_to_dev(priv); unsigned long flags; - int ret = 0; + int ret = 0, i; /******************** spin lock ********************/ usbhs_lock(priv, flags); @@ -887,7 +901,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) usbhs_sys_set_test_mode(priv, 0); usbhs_sys_function_ctrl(priv, 0); - usbhsg_ep_disable(&dcp->ep); + /* disable all eps */ + usbhsg_for_each_uep_with_dcp(uep, gpriv, i) + usbhsg_ep_disable(&uep->ep); dev_dbg(dev, "stop gadget\n"); @@ -1069,6 +1085,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) ret = -ENOMEM; goto usbhs_mod_gadget_probe_err_gpriv; } + spin_lock_init(&uep->lock); gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); dev_info(dev, "%stransceiver found\n", diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c index d544b331c9f2..02b67abfc2a1 100644 --- a/drivers/usb/renesas_usbhs/rcar3.c +++ b/drivers/usb/renesas_usbhs/rcar3.c @@ -20,9 +20,13 @@ /* Low Power Status register (LPSTS) */ #define LPSTS_SUSPM 0x4000 -/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ +/* + * USB General control register 2 (UGCTRL2) + * Remarks: bit[31:11] and bit[9:6] should be 0 + */ #define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ #define UGCTRL2_USB0SEL_OTG 0x00000030 +#define UGCTRL2_VBUSSEL 0x00000400 static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) { @@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev, { struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); - usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); + usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG | + UGCTRL2_VBUSSEL); if (enable) { usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 0c55e7f64269..2d945c9f975c 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -141,6 +141,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3bf61acfc26b..fe123153b1a5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1877,6 +1877,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&four_g_w100_blacklist }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, @@ -2021,6 +2025,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index c9ebefd8f35f..a585b477415d 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, + { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485), + .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 09d9be88209e..3b5a15d1dc0d 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -27,6 +27,7 @@ #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 #define ATEN_PRODUCT_ID 0x2008 +#define ATEN_PRODUCT_UC485 0x2021 #define ATEN_PRODUCT_ID2 0x2118 #define IODATA_VENDOR_ID 0x04bb diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index fd509ed6cf70..652b4334b26d 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index fba4005dd737..6a7720e66595 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -1529,8 +1529,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us) /* Make sure driver was initialized */ - if (us->extra == NULL) + if (us->extra == NULL) { usb_stor_dbg(us, "ERROR Driver not initialized\n"); + srb->result = DID_ERROR << 16; + return; + } scsi_set_resid(srb, 0); /* scsi_bufflen might change in protocol translation to ata */ diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index cbea9f329e71..cde115359793 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, /* Reported-by: Benjamin Tissoires <benjamin.tissoires@xxxxxxxxxx> */ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, "Initio Corporation", - "", + "INIC-3069", USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_NO_ATA_1X), + US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE), /* Reported-by: Tom Arild Naess <tanaess@xxxxxxxxx> */ UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 06615934fed1..0dceb9fa3a06 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us) { struct us_data *us = (struct us_data *)__us; struct Scsi_Host *host = us_to_host(us); + struct scsi_cmnd *srb; for (;;) { usb_stor_dbg(us, "*** thread sleeping\n"); @@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us) scsi_lock(host); /* When we are called with no command pending, we're done */ + srb = us->srb; if (us->srb == NULL) { scsi_unlock(host); mutex_unlock(&us->dev_mutex); @@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us) /* lock access to the state */ scsi_lock(host); - /* indicate that the command is done */ - if (us->srb->result != DID_ABORT << 16) { - usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", - us->srb->result); - us->srb->scsi_done(us->srb); - } else { + /* was the command aborted? */ + if (us->srb->result == DID_ABORT << 16) { SkipForAbort: usb_stor_dbg(us, "scsi command aborted\n"); + srb = NULL; /* Don't call srb->scsi_done() */ } /* @@ -429,6 +428,13 @@ static int usb_stor_control_thread(void * __us) /* unlock the device pointers */ mutex_unlock(&us->dev_mutex); + + /* now that the locks are released, notify the SCSI core */ + if (srb) { + usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", + srb->result); + srb->scsi_done(srb); + } } /* for (;;) */ /* Wait until we are told to stop */ diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 44ab43fc4fcc..af10f7b131a4 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) kmem_cache_free(stub_priv_cache, priv); kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + kfree(urb->setup_packet); + urb->setup_packet = NULL; + usb_free_urb(urb); } } diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index 6b1e8c3f0e4b..be50cef645d8 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv) struct urb *urb = priv->urb; kfree(urb->setup_packet); + urb->setup_packet = NULL; + kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + list_del(&priv->list); kmem_cache_free(stub_priv_cache, priv); usb_free_urb(urb); diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 561084ab387f..51ee9a0c4c7d 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -423,6 +423,34 @@ static void vfio_group_put(struct vfio_group *group) kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); } +struct vfio_group_put_work { + struct work_struct work; + struct vfio_group *group; +}; + +static void vfio_group_put_bg(struct work_struct *work) +{ + struct vfio_group_put_work *do_work; + + do_work = container_of(work, struct vfio_group_put_work, work); + + vfio_group_put(do_work->group); + kfree(do_work); +} + +static void vfio_group_schedule_put(struct vfio_group *group) +{ + struct vfio_group_put_work *do_work; + + do_work = kmalloc(sizeof(*do_work), GFP_KERNEL); + if (WARN_ON(!do_work)) + return; + + INIT_WORK(&do_work->work, vfio_group_put_bg); + do_work->group = group; + schedule_work(&do_work->work); +} + /* Assume group_lock or group reference is held */ static void vfio_group_get(struct vfio_group *group) { @@ -762,7 +790,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, break; } - vfio_group_put(group); + /* + * If we're the last reference to the group, the group will be + * released, which includes unregistering the iommu group notifier. + * We hold a read-lock on that notifier list, unregistering needs + * a write-lock... deadlock. Release our reference asynchronously + * to avoid that situation. + */ + vfio_group_schedule_put(group); return NOTIFY_OK; } @@ -1140,15 +1175,11 @@ static long vfio_fops_unl_ioctl(struct file *filep, ret = vfio_ioctl_set_iommu(container, arg); break; default: - down_read(&container->group_lock); - driver = container->iommu_driver; data = container->iommu_data; if (driver) /* passthrough all unrecognized ioctls */ ret = driver->ops->ioctl(data, cmd, arg); - - up_read(&container->group_lock); } return ret; @@ -1202,15 +1233,11 @@ static ssize_t vfio_fops_read(struct file *filep, char __user *buf, struct vfio_iommu_driver *driver; ssize_t ret = -EINVAL; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->read)) ret = driver->ops->read(container->iommu_data, buf, count, ppos); - up_read(&container->group_lock); - return ret; } @@ -1221,15 +1248,11 @@ static ssize_t vfio_fops_write(struct file *filep, const char __user *buf, struct vfio_iommu_driver *driver; ssize_t ret = -EINVAL; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->write)) ret = driver->ops->write(container->iommu_data, buf, count, ppos); - up_read(&container->group_lock); - return ret; } @@ -1239,14 +1262,10 @@ static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma) struct vfio_iommu_driver *driver; int ret = -EINVAL; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->mmap)) ret = driver->ops->mmap(container->iommu_data, vma); - up_read(&container->group_lock); - return ret; } @@ -1741,6 +1760,15 @@ void vfio_group_put_external_user(struct vfio_group *group) } EXPORT_SYMBOL_GPL(vfio_group_put_external_user); +bool vfio_external_group_match_file(struct vfio_group *test_group, + struct file *filep) +{ + struct vfio_group *group = filep->private_data; + + return (filep->f_op == &vfio_group_fops) && (group == test_group); +} +EXPORT_SYMBOL_GPL(vfio_external_group_match_file); + int vfio_external_user_iommu_id(struct vfio_group *group) { return iommu_group_id(group->iommu_group); @@ -1949,8 +1977,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, goto err_pin_pages; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->pin_pages)) ret = driver->ops->pin_pages(container->iommu_data, user_pfn, @@ -1958,7 +1984,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); err_pin_pages: @@ -1998,8 +2023,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) goto err_unpin_pages; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->unpin_pages)) ret = driver->ops->unpin_pages(container->iommu_data, user_pfn, @@ -2007,7 +2030,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); err_unpin_pages: @@ -2029,8 +2051,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group, return -EINVAL; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->register_notifier)) ret = driver->ops->register_notifier(container->iommu_data, @@ -2038,7 +2058,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group, else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; @@ -2056,8 +2075,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group, return -EINVAL; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->unregister_notifier)) ret = driver->ops->unregister_notifier(container->iommu_data, @@ -2065,7 +2082,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group, else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; @@ -2083,7 +2099,6 @@ static int vfio_register_group_notifier(struct vfio_group *group, unsigned long *events, struct notifier_block *nb) { - struct vfio_container *container; int ret; bool set_kvm = false; @@ -2101,9 +2116,6 @@ static int vfio_register_group_notifier(struct vfio_group *group, if (ret) return -EINVAL; - container = group->container; - down_read(&container->group_lock); - ret = blocking_notifier_chain_register(&group->notifier, nb); /* @@ -2114,7 +2126,6 @@ static int vfio_register_group_notifier(struct vfio_group *group, blocking_notifier_call_chain(&group->notifier, VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; @@ -2123,19 +2134,14 @@ static int vfio_register_group_notifier(struct vfio_group *group, static int vfio_unregister_group_notifier(struct vfio_group *group, struct notifier_block *nb) { - struct vfio_container *container; int ret; ret = vfio_group_add_container_user(group); if (ret) return -EINVAL; - container = group->container; - down_read(&container->group_lock); - ret = blocking_notifier_chain_unregister(&group->notifier, nb); - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 007a4f366086..1c4797e53f68 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, { struct virtio_pci_device *vp_dev = to_vp_device(vdev); const char *name = dev_name(&vp_dev->vdev.dev); + unsigned flags = PCI_IRQ_MSIX; unsigned i, v; int err = -ENOMEM; @@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, GFP_KERNEL)) goto error; + if (desc) { + flags |= PCI_IRQ_AFFINITY; + desc->pre_vectors++; /* virtio config vector */ + } + err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, - nvectors, PCI_IRQ_MSIX | - (desc ? PCI_IRQ_AFFINITY : 0), - desc); + nvectors, flags, desc); if (err < 0) goto error; vp_dev->msix_enabled = 1; diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index 4da69dbf7dca..1bdd02a6d6ac 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c @@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); - return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && - ((bfn1 == bfn2) || ((bfn1+1) == bfn2)); + return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; #else /* * XXX: Add support for merging bio_vec when using different page diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index d6950e0802b7..980f32817305 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -134,9 +134,7 @@ struct vscsibk_pend { struct page *pages[VSCSI_MAX_GRANTS]; struct se_cmd se_cmd; -}; -struct scsiback_tmr { atomic_t tmr_complete; wait_queue_head_t tmr_wait; }; @@ -599,26 +597,20 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req, struct scsiback_tpg *tpg = pending_req->v2p->tpg; struct scsiback_nexus *nexus = tpg->tpg_nexus; struct se_cmd *se_cmd = &pending_req->se_cmd; - struct scsiback_tmr *tmr; u64 unpacked_lun = pending_req->v2p->lun; int rc, err = FAILED; - tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL); - if (!tmr) { - target_put_sess_cmd(se_cmd); - goto err; - } - - init_waitqueue_head(&tmr->tmr_wait); + init_waitqueue_head(&pending_req->tmr_wait); rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess, &pending_req->sense_buffer[0], - unpacked_lun, tmr, act, GFP_KERNEL, + unpacked_lun, NULL, act, GFP_KERNEL, tag, TARGET_SCF_ACK_KREF); if (rc) goto err; - wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete)); + wait_event(pending_req->tmr_wait, + atomic_read(&pending_req->tmr_complete)); err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; @@ -626,9 +618,8 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req, scsiback_do_resp_with_sense(NULL, err, 0, pending_req); transport_generic_free_cmd(&pending_req->se_cmd, 1); return; + err: - if (tmr) - kfree(tmr); scsiback_do_resp_with_sense(NULL, err, 0, pending_req); } @@ -1389,12 +1380,6 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd) static void scsiback_release_cmd(struct se_cmd *se_cmd) { struct se_session *se_sess = se_cmd->se_sess; - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - - if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { - struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; - kfree(tmr); - } percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); } @@ -1455,11 +1440,11 @@ static int scsiback_queue_status(struct se_cmd *se_cmd) static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd) { - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; + struct vscsibk_pend *pending_req = container_of(se_cmd, + struct vscsibk_pend, se_cmd); - atomic_set(&tmr->tmr_complete, 1); - wake_up(&tmr->tmr_wait); + atomic_set(&pending_req->tmr_complete, 1); + wake_up(&pending_req->tmr_wait); } static void scsiback_aborted_task(struct se_cmd *se_cmd) diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index 856ada5d39c9..5b081a01779d 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -299,17 +299,7 @@ static int process_msg(void) mutex_lock(&xb_write_mutex); list_for_each_entry(req, &xs_reply_list, list) { if (req->msg.req_id == state.msg.req_id) { - if (req->state == xb_req_state_wait_reply) { - req->msg.type = state.msg.type; - req->msg.len = state.msg.len; - req->body = state.body; - req->state = xb_req_state_got_reply; - list_del(&req->list); - req->cb(req); - } else { - list_del(&req->list); - kfree(req); - } + list_del(&req->list); err = 0; break; } @@ -317,6 +307,15 @@ static int process_msg(void) mutex_unlock(&xb_write_mutex); if (err) goto out; + + if (req->state == xb_req_state_wait_reply) { + req->msg.type = state.msg.type; + req->msg.len = state.msg.len; + req->body = state.body; + req->state = xb_req_state_got_reply; + req->cb(req); + } else + kfree(req); } mutex_unlock(&xs_response_mutex); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 5075fd5c62c8..9867eda73769 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -666,8 +666,7 @@ static unsigned long randomize_stack_top(unsigned long stack_top) { unsigned long random_variable = 0; - if ((current->flags & PF_RANDOMIZE) && - !(current->personality & ADDR_NO_RANDOMIZE)) { + if (current->flags & PF_RANDOMIZE) { random_variable = get_random_long(); random_variable &= STACK_RND_MASK; random_variable <<= PAGE_SHIFT; @@ -927,17 +926,60 @@ static int load_elf_binary(struct linux_binprm *bprm) elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; + /* + * If we are loading ET_EXEC or we have already performed + * the ET_DYN load_addr calculations, proceed normally. + */ if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (loc->elf_ex.e_type == ET_DYN) { - /* Try and get dynamic programs out of the way of the - * default mmap base, as well as whatever program they - * might try to exec. This is because the brk will - * follow the loader, and is not movable. */ - load_bias = ELF_ET_DYN_BASE - vaddr; - if (current->flags & PF_RANDOMIZE) - load_bias += arch_mmap_rnd(); - load_bias = ELF_PAGESTART(load_bias); + /* + * This logic is run once for the first LOAD Program + * Header for ET_DYN binaries to calculate the + * randomization (load_bias) for all the LOAD + * Program Headers, and to calculate the entire + * size of the ELF mapping (total_size). (Note that + * load_addr_set is set to true later once the + * initial mapping is performed.) + * + * There are effectively two types of ET_DYN + * binaries: programs (i.e. PIE: ET_DYN with INTERP) + * and loaders (ET_DYN without INTERP, since they + * _are_ the ELF interpreter). The loaders must + * be loaded away from programs since the program + * may otherwise collide with the loader (especially + * for ET_EXEC which does not have a randomized + * position). For example to handle invocations of + * "./ld.so someprog" to test out a new version of + * the loader, the subsequent program that the + * loader loads must avoid the loader itself, so + * they cannot share the same load range. Sufficient + * room for the brk must be allocated with the + * loader as well, since brk must be available with + * the loader. + * + * Therefore, programs are loaded offset from + * ELF_ET_DYN_BASE and loaders are loaded into the + * independently randomized mmap region (0 load_bias + * without MAP_FIXED). + */ + if (elf_interpreter) { + load_bias = ELF_ET_DYN_BASE; + if (current->flags & PF_RANDOMIZE) + load_bias += arch_mmap_rnd(); + elf_flags |= MAP_FIXED; + } else + load_bias = 0; + + /* + * Since load_bias is used for all subsequent loading + * calculations, we must lower it by the first vaddr + * so that the remaining calculations based on the + * ELF vaddrs will be correctly offset. The result + * is then page aligned. + */ + load_bias = ELF_PAGESTART(load_bias - vaddr); + total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum); if (!total_size) { diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 247b8dfaf6e5..8d8370ddb6b2 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -78,12 +78,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, switch (type) { case ACL_TYPE_ACCESS: name = XATTR_NAME_POSIX_ACL_ACCESS; - if (acl) { - ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (ret) - return ret; - } - ret = 0; break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) @@ -119,6 +113,13 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) { + int ret; + + if (type == ACL_TYPE_ACCESS && acl) { + ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (ret) + return ret; + } return __btrfs_set_acl(NULL, inode, acl, type); } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5f678dcb20e6..db58d06c4e75 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1255,9 +1255,9 @@ void clean_tree_block(struct btrfs_fs_info *fs_info, btrfs_assert_tree_locked(buf); if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { - __percpu_counter_add(&fs_info->dirty_metadata_bytes, - -buf->len, - fs_info->dirty_metadata_batch); + percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, + -buf->len, + fs_info->dirty_metadata_batch); /* ugh, clear_extent_buffer_dirty needs to lock the page */ btrfs_set_lock_blocking(buf); clear_extent_buffer_dirty(buf); @@ -4049,9 +4049,9 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf) buf->start, transid, fs_info->generation); was_dirty = set_extent_buffer_dirty(buf); if (!was_dirty) - __percpu_counter_add(&fs_info->dirty_metadata_bytes, - buf->len, - fs_info->dirty_metadata_batch); + percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, + buf->len, + fs_info->dirty_metadata_batch); #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) { btrfs_print_leaf(fs_info, buf); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 33d979e9ea2a..83eecd33ad96 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4776,10 +4776,6 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, else flush = BTRFS_RESERVE_NO_FLUSH; spin_lock(&space_info->lock); - if (can_overcommit(root, space_info, orig, flush)) { - spin_unlock(&space_info->lock); - break; - } if (list_empty(&space_info->tickets) && list_empty(&space_info->priority_tickets)) { spin_unlock(&space_info->lock); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d3619e010005..0c7b61f72478 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3597,9 +3597,9 @@ lock_extent_buffer_for_io(struct extent_buffer *eb, set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); spin_unlock(&eb->refs_lock); btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); - __percpu_counter_add(&fs_info->dirty_metadata_bytes, - -eb->len, - fs_info->dirty_metadata_batch); + percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, + -eb->len, + fs_info->dirty_metadata_batch); ret = 1; } else { spin_unlock(&eb->refs_lock); @@ -4463,29 +4463,25 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo, } /* - * Sanity check for fiemap cache + * Emit last fiemap cache * - * All fiemap cache should be submitted by emit_fiemap_extent() - * Iteration should be terminated either by last fiemap extent or - * fieinfo->fi_extents_max. - * So no cached fiemap should exist. + * The last fiemap cache may still be cached in the following case: + * 0 4k 8k + * |<- Fiemap range ->| + * |<------------ First extent ----------->| + * + * In this case, the first extent range will be cached but not emitted. + * So we must emit it before ending extent_fiemap(). */ -static int check_fiemap_cache(struct btrfs_fs_info *fs_info, - struct fiemap_extent_info *fieinfo, - struct fiemap_cache *cache) +static int emit_last_fiemap_cache(struct btrfs_fs_info *fs_info, + struct fiemap_extent_info *fieinfo, + struct fiemap_cache *cache) { int ret; if (!cache->cached) return 0; - /* Small and recoverbale problem, only to info developer */ -#ifdef CONFIG_BTRFS_DEBUG - WARN_ON(1); -#endif - btrfs_warn(fs_info, - "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x", - cache->offset, cache->phys, cache->len, cache->flags); ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, cache->len, cache->flags); cache->cached = false; @@ -4701,7 +4697,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } out_free: if (!ret) - ret = check_fiemap_cache(root->fs_info, fieinfo, &cache); + ret = emit_last_fiemap_cache(root->fs_info, fieinfo, &cache); free_extent_map(em); out: btrfs_free_path(path); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index da1096eb1a40..5da85b080368 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2390,10 +2390,13 @@ static int fill_holes(struct btrfs_trans_handle *trans, */ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em; int ret = 0; - em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, + round_down(*start, fs_info->sectorsize), + round_up(*len, fs_info->sectorsize), 0); if (IS_ERR(em)) return PTR_ERR(em); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ef3c98c527c1..a760b8d8e39c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1766,8 +1766,8 @@ static void btrfs_set_bit_hook(struct inode *inode, if (btrfs_is_testing(fs_info)) return; - __percpu_counter_add(&fs_info->delalloc_bytes, len, - fs_info->delalloc_batch); + percpu_counter_add_batch(&fs_info->delalloc_bytes, len, + fs_info->delalloc_batch); spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->delalloc_bytes += len; if (*bits & EXTENT_DEFRAG) @@ -1840,8 +1840,8 @@ static void btrfs_clear_bit_hook(struct btrfs_inode *inode, &inode->vfs_inode, state->start, len); - __percpu_counter_add(&fs_info->delalloc_bytes, -len, - fs_info->delalloc_batch); + percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, + fs_info->delalloc_batch); spin_lock(&inode->lock); inode->delalloc_bytes -= len; if (do_list && inode->delalloc_bytes == 0 && diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index fc496a6f842a..17557b952846 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5190,15 +5190,12 @@ static int is_extent_unchanged(struct send_ctx *sctx, goto out; } - right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); if (right_type == BTRFS_FILE_EXTENT_INLINE) { right_len = btrfs_file_extent_inline_len(eb, slot, ei); right_len = PAGE_ALIGN(right_len); } else { right_len = btrfs_file_extent_num_bytes(eb, ei); } - right_offset = btrfs_file_extent_offset(eb, ei); - right_gen = btrfs_file_extent_generation(eb, ei); /* * Are we at extent 8? If yes, we know the extent is changed. @@ -5223,6 +5220,10 @@ static int is_extent_unchanged(struct send_ctx *sctx, goto out; } + right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); + right_offset = btrfs_file_extent_offset(eb, ei); + right_gen = btrfs_file_extent_generation(eb, ei); + left_offset_fixed = left_offset; if (key.offset < ekey->offset) { /* Fix the right offset for 2a and 7. */ diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index e071d23f6148..ef7240ace576 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -271,6 +271,11 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, if (ret < 0) err = ret; dput(last); + /* last_name no longer match cache index */ + if (fi->readdir_cache_idx >= 0) { + fi->readdir_cache_idx = -1; + fi->dir_release_count = 0; + } } return err; } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index bcc7d9acad64..fb482515a64b 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -367,6 +367,8 @@ struct smb_version_operations { unsigned int (*calc_smb_size)(void *); /* check for STATUS_PENDING and process it in a positive case */ bool (*is_status_pending)(char *, struct TCP_Server_Info *, int); + /* check for STATUS_NETWORK_SESSION_EXPIRED */ + bool (*is_session_expired)(char *); /* send oplock break response */ int (*oplock_response)(struct cifs_tcon *, struct cifs_fid *, struct cifsInodeInfo *); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index fbb0d4cbda41..72a53bd19865 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1460,6 +1460,13 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) return length; server->total_read += length; + if (server->ops->is_session_expired && + server->ops->is_session_expired(buf)) { + cifs_reconnect(server); + wake_up(&server->response_q); + return -1; + } + if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server, 0)) { cifs_discard_remaining_data(server); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9365c0cf77ad..c59d77f64f74 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -812,6 +812,13 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) cifs_dump_mem("Bad SMB: ", buf, min_t(unsigned int, server->total_read, 48)); + if (server->ops->is_session_expired && + server->ops->is_session_expired(buf)) { + cifs_reconnect(server); + wake_up(&server->response_q); + return -1; + } + if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server, length)) return -1; diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 56366e984076..569d3fb736be 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -194,15 +194,20 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, bool prefix) } /* + * Don't allow path components longer than the server max. * Don't allow the separator character in a path component. * The VFS will not allow "/", but "\" is allowed by posix. */ static int -check_name(struct dentry *direntry) +check_name(struct dentry *direntry, struct cifs_tcon *tcon) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); int i; + if (unlikely(direntry->d_name.len > + tcon->fsAttrInfo.MaxPathNameComponentLength)) + return -ENAMETOOLONG; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) { for (i = 0; i < direntry->d_name.len; i++) { if (direntry->d_name.name[i] == '\\') { @@ -500,10 +505,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, return finish_no_open(file, res); } - rc = check_name(direntry); - if (rc) - return rc; - xid = get_xid(); cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n", @@ -516,6 +517,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, } tcon = tlink_tcon(tlink); + + rc = check_name(direntry, tcon); + if (rc) + goto out_free_xid; + server = tcon->ses->server; if (server->ops->new_lease_key) @@ -776,7 +782,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } pTcon = tlink_tcon(tlink); - rc = check_name(direntry); + rc = check_name(direntry, pTcon); if (rc) goto lookup_out; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 7e48561abd29..67f9b1445043 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1036,6 +1036,18 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server, int length) return true; } +static bool +smb2_is_session_expired(char *buf) +{ + struct smb2_sync_hdr *shdr = get_sync_hdr(buf); + + if (shdr->Status != STATUS_NETWORK_SESSION_EXPIRED) + return false; + + cifs_dbg(FYI, "Session expired\n"); + return true; +} + static int smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid, struct cifsInodeInfo *cinode) @@ -2058,6 +2070,13 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, return -ENOTSUPP; } + if (server->ops->is_session_expired && + server->ops->is_session_expired(buf)) { + cifs_reconnect(server); + wake_up(&server->response_q); + return -1; + } + if (server->ops->is_status_pending && server->ops->is_status_pending(buf, server, 0)) return -1; @@ -2375,6 +2394,7 @@ struct smb_version_operations smb20_operations = { .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, + .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb2_queryfs, .mand_lock = smb2_mand_lock, @@ -2458,6 +2478,7 @@ struct smb_version_operations smb21_operations = { .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, + .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb2_queryfs, .mand_lock = smb2_mand_lock, @@ -2543,6 +2564,7 @@ struct smb_version_operations smb30_operations = { .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, + .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb2_queryfs, .mand_lock = smb2_mand_lock, @@ -2638,6 +2660,7 @@ struct smb_version_operations smb311_operations = { .close_dir = smb2_close_dir, .calc_smb_size = smb2_calc_size, .is_status_pending = smb2_is_status_pending, + .is_session_expired = smb2_is_session_expired, .oplock_response = smb2_oplock_response, .queryfs = smb2_queryfs, .mand_lock = smb2_mand_lock, diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index e4afdaae743f..c398f393f2b3 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -3195,8 +3195,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf, kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) * le32_to_cpu(pfs_inf->SectorsPerAllocationUnit); kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits); - kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits); - kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); + kst->f_bfree = kst->f_bavail = + le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits); return; } diff --git a/fs/dax.c b/fs/dax.c index 9187f3b07f3e..f3ac7674b5cb 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1380,6 +1380,16 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, trace_dax_pmd_fault(inode, vmf, max_pgoff, 0); + /* + * Make sure that the faulting address's PMD offset (color) matches + * the PMD offset from the start of the file. This is necessary so + * that a PMD range in the page table overlaps exactly with a PMD + * range in the radix tree. + */ + if ((vmf->pgoff & PG_PMD_COLOUR) != + ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) + goto fallback; + /* Fall back to PTEs if we're going to COW */ if (write && !(vma->vm_flags & VM_SHARED)) goto fallback; diff --git a/fs/dcache.c b/fs/dcache.c index a9f995f6859e..736754c5ab63 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -277,6 +277,33 @@ static inline int dname_external(const struct dentry *dentry) return dentry->d_name.name != dentry->d_iname; } +void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + if (unlikely(dname_external(dentry))) { + struct external_name *p = external_name(dentry); + atomic_inc(&p->u.count); + spin_unlock(&dentry->d_lock); + name->name = p->name; + } else { + memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN); + spin_unlock(&dentry->d_lock); + name->name = name->inline_name; + } +} +EXPORT_SYMBOL(take_dentry_name_snapshot); + +void release_dentry_name_snapshot(struct name_snapshot *name) +{ + if (unlikely(name->name != name->inline_name)) { + struct external_name *p; + p = container_of(name->name, struct external_name, name[0]); + if (unlikely(atomic_dec_and_test(&p->u.count))) + kfree_rcu(p, u.head); + } +} +EXPORT_SYMBOL(release_dentry_name_snapshot); + static inline void __d_set_inode_and_type(struct dentry *dentry, struct inode *inode, unsigned type_flags) @@ -1133,11 +1160,12 @@ void shrink_dcache_sb(struct super_block *sb) LIST_HEAD(dispose); freed = list_lru_walk(&sb->s_dentry_lru, - dentry_lru_isolate_shrink, &dispose, UINT_MAX); + dentry_lru_isolate_shrink, &dispose, 1024); this_cpu_sub(nr_dentry_unused, freed); shrink_dentry_list(&dispose); - } while (freed > 0); + cond_resched(); + } while (list_lru_count(&sb->s_dentry_lru) > 0); } EXPORT_SYMBOL(shrink_dcache_sb); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index e892ae7d89f8..acd3be2cc691 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -766,7 +766,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, { int error; struct dentry *dentry = NULL, *trap; - const char *old_name; + struct name_snapshot old_name; trap = lock_rename(new_dir, old_dir); /* Source or destination directories don't exist? */ @@ -781,19 +781,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) goto exit; - old_name = fsnotify_oldname_init(old_dentry->d_name.name); + take_dentry_name_snapshot(&old_name, old_dentry); error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir), dentry, 0); if (error) { - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); goto exit; } d_move(old_dentry, dentry); - fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name, + fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name, d_is_dir(old_dentry), NULL, old_dentry); - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); unlock_rename(new_dir, old_dir); dput(dentry); return old_dentry; diff --git a/fs/exec.c b/fs/exec.c index 904199086490..62175cbcc801 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -220,8 +220,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; - unsigned long ptr_size; - struct rlimit *rlim; + unsigned long ptr_size, limit; /* * Since the stack will hold pointers to the strings, we @@ -250,14 +249,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, return page; /* - * Limit to 1/4-th the stack size for the argv+env strings. + * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM + * (whichever is smaller) for the argv+env strings. * This ensures that: * - the remaining binfmt code will not run out of stack space, * - the program will have a reasonable amount of stack left * to work from. */ - rlim = current->signal->rlim; - if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) + limit = _STK_LIM / 4 * 3; + limit = min(limit, rlimit(RLIMIT_STACK) / 4); + if (size > limit) goto fail; } diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index 79dafa71effd..069c0dceda01 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c @@ -175,11 +175,8 @@ ext2_get_acl(struct inode *inode, int type) return acl; } -/* - * inode->i_mutex: down - */ -int -ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) +static int +__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int name_index; void *value = NULL; @@ -189,13 +186,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) switch(type) { case ACL_TYPE_ACCESS: name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl) { - error = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (error) - return error; - inode->i_ctime = current_time(inode); - mark_inode_dirty(inode); - } break; case ACL_TYPE_DEFAULT: @@ -221,6 +211,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) return error; } +/* + * inode->i_mutex: down + */ +int +ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + int error; + + if (type == ACL_TYPE_ACCESS && acl) { + error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error) + return error; + inode->i_ctime = current_time(inode); + mark_inode_dirty(inode); + } + return __ext2_set_acl(inode, acl, type); +} + /* * Initialize the ACLs of a new inode. Called from ext2_new_inode. * @@ -238,12 +246,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir) return error; if (default_acl) { - error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); + error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!error) - error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS); + error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); } return error; diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 3ec0e46de95f..22a8d532cca6 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -193,13 +193,6 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type, switch (type) { case ACL_TYPE_ACCESS: name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl) { - error = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (error) - return error; - inode->i_ctime = current_time(inode); - ext4_mark_inode_dirty(handle, inode); - } break; case ACL_TYPE_DEFAULT: @@ -221,8 +214,9 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type, value, size, 0); kfree(value); - if (!error) + if (!error) { set_cached_acl(inode, type, acl); + } return error; } @@ -232,6 +226,8 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type) { handle_t *handle; int error, retries = 0; + umode_t mode = inode->i_mode; + int update_mode = 0; error = dquot_initialize(inode); if (error) @@ -242,7 +238,20 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type) if (IS_ERR(handle)) return PTR_ERR(handle); + if ((type == ACL_TYPE_ACCESS) && acl) { + error = posix_acl_update_mode(inode, &mode, &acl); + if (error) + goto out_stop; + update_mode = 1; + } + error = __ext4_set_acl(handle, inode, type, acl); + if (!error && update_mode) { + inode->i_mode = mode; + inode->i_ctime = current_time(inode); + ext4_mark_inode_dirty(handle, inode); + } +out_stop: ext4_journal_stop(handle); if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 02ce7e7bbdf5..407fc5aa32a7 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -521,6 +521,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, lastoff = page_offset(page); bh = head = page_buffers(page); do { + if (lastoff + bh->b_size <= startoff) + goto next; if (buffer_uptodate(bh) || buffer_unwritten(bh)) { if (whence == SEEK_DATA) @@ -535,6 +537,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, unlock_page(page); goto out; } +next: lastoff += bh->b_size; bh = bh->b_this_page; } while (bh != head); diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index c3ed9021b781..035cd3f4785e 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -1927,7 +1927,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count) n_desc_blocks = o_desc_blocks + le16_to_cpu(es->s_reserved_gdt_blocks); n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb); - n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb); + n_blocks_count = (ext4_fsblk_t)n_group * + EXT4_BLOCKS_PER_GROUP(sb); n_group--; /* set to last group number */ } diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index d74dc5f81a04..48c7a7d55ed3 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a, int ret; ret = kstrtoull(skip_spaces(buf), 0, &val); - if (!ret || val >= clusters) + if (ret || val >= clusters) return -EINVAL; atomic64_set(&sbi->s_resv_clusters, val); diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 8f487692c21f..8ac67d547826 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -211,7 +211,7 @@ static int __f2fs_set_acl(struct inode *inode, int type, switch (type) { case ACL_TYPE_ACCESS: name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl) { + if (acl && !ipage) { error = posix_acl_update_mode(inode, &inode->i_mode, &acl); if (error) return error; diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index ea9c317b5916..06f7854d4e52 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1051,8 +1051,9 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) { unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); + unsigned long flags; - spin_lock(&sbi->cp_lock); + spin_lock_irqsave(&sbi->cp_lock, flags); if ((cpc->reason & CP_UMOUNT) && le32_to_cpu(ckpt->cp_pack_total_block_count) > @@ -1083,14 +1084,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* set this flag to activate crc|cp_ver for recovery */ __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG); - spin_unlock(&sbi->cp_lock); + spin_unlock_irqrestore(&sbi->cp_lock, flags); } static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); - unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; + unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; block_t start_blk; unsigned int data_sum_blocks, orphan_blocks; __u32 crc32 = 0; @@ -1132,12 +1133,12 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* 2 cp + n data seg summary + orphan inode blocks */ data_sum_blocks = npages_for_summary_flush(sbi, false); - spin_lock(&sbi->cp_lock); + spin_lock_irqsave(&sbi->cp_lock, flags); if (data_sum_blocks < NR_CURSEG_DATA_TYPE) __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); else __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); - spin_unlock(&sbi->cp_lock); + spin_unlock_irqrestore(&sbi->cp_lock, flags); orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num); ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index fd2e651bad6d..99271b76e05d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1228,9 +1228,11 @@ static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) { - spin_lock(&sbi->cp_lock); + unsigned long flags; + + spin_lock_irqsave(&sbi->cp_lock, flags); __set_ckpt_flags(F2FS_CKPT(sbi), f); - spin_unlock(&sbi->cp_lock); + spin_unlock_irqrestore(&sbi->cp_lock, flags); } static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) @@ -1244,22 +1246,26 @@ static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) { - spin_lock(&sbi->cp_lock); + unsigned long flags; + + spin_lock_irqsave(&sbi->cp_lock, flags); __clear_ckpt_flags(F2FS_CKPT(sbi), f); - spin_unlock(&sbi->cp_lock); + spin_unlock_irqrestore(&sbi->cp_lock, flags); } static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) { + unsigned long flags; + set_sbi_flag(sbi, SBI_NEED_FSCK); if (lock) - spin_lock(&sbi->cp_lock); + spin_lock_irqsave(&sbi->cp_lock, flags); __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); kfree(NM_I(sbi)->nat_bits); NM_I(sbi)->nat_bits = NULL; if (lock) - spin_unlock(&sbi->cp_lock); + spin_unlock_irqrestore(&sbi->cp_lock, flags); } static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 61af721329fa..666edc49e852 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1493,6 +1493,7 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) inode->i_ctime = current_time(inode); f2fs_set_inode_flags(inode); + f2fs_mark_inode_dirty_sync(inode, false); inode_unlock(inode); out: diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 026522107ca3..3af905ce6c27 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -32,13 +32,14 @@ static int gc_thread_func(void *data) wait_ms = gc_th->min_sleep_time; + set_freezable(); do { + wait_event_interruptible_timeout(*wq, + kthread_should_stop() || freezing(current), + msecs_to_jiffies(wait_ms)); + if (try_to_freeze()) continue; - else - wait_event_interruptible_timeout(*wq, - kthread_should_stop(), - msecs_to_jiffies(wait_ms)); if (kthread_should_stop()) break; diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 518f49643092..e53c784ab11e 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -44,7 +44,6 @@ void f2fs_set_inode_flags(struct inode *inode) new_fl |= S_DIRSYNC; inode_set_flags(inode, new_fl, S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); - f2fs_mark_inode_dirty_sync(inode, false); } static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) @@ -226,6 +225,7 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) ret = -EIO; goto bad_inode; } + f2fs_set_inode_flags(inode); unlock_new_inode(inode); trace_f2fs_iget(inode); return inode; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 96845854e7ee..fca7577d31d1 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -16,6 +16,7 @@ #include <linux/kthread.h> #include <linux/swap.h> #include <linux/timer.h> +#include <linux/freezer.h> #include "f2fs.h" #include "segment.h" @@ -751,7 +752,7 @@ static void f2fs_submit_discard_endio(struct bio *bio) dc->error = bio->bi_error; dc->state = D_DONE; - complete(&dc->wait); + complete_all(&dc->wait); bio_put(bio); } @@ -1060,18 +1061,24 @@ static int issue_discard_thread(void *data) struct f2fs_sb_info *sbi = data; struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; wait_queue_head_t *q = &dcc->discard_wait_queue; -repeat: - if (kthread_should_stop()) - return 0; - __issue_discard_cmd(sbi, true); - __wait_discard_cmd(sbi, true); + set_freezable(); - congestion_wait(BLK_RW_SYNC, HZ/50); + do { + wait_event_interruptible(*q, kthread_should_stop() || + freezing(current) || + atomic_read(&dcc->discard_cmd_cnt)); + if (try_to_freeze()) + continue; + if (kthread_should_stop()) + return 0; - wait_event_interruptible(*q, kthread_should_stop() || - atomic_read(&dcc->discard_cmd_cnt)); - goto repeat; + __issue_discard_cmd(sbi, true); + __wait_discard_cmd(sbi, true); + + congestion_wait(BLK_RW_SYNC, HZ/50); + } while (!kthread_should_stop()); + return 0; } #ifdef CONFIG_BLK_DEV_ZONED @@ -1322,7 +1329,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) sbi->blocks_per_seg, cur_pos); len = next_pos - cur_pos; - if (force && len < cpc->trim_minlen) + if (f2fs_sb_mounted_blkzoned(sbi->sb) || + (force && len < cpc->trim_minlen)) goto skip; f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, @@ -2455,6 +2463,8 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) static int restore_curseg_summaries(struct f2fs_sb_info *sbi) { + struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal; + struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal; int type = CURSEG_HOT_DATA; int err; @@ -2481,6 +2491,11 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) return err; } + /* sanity check for summary blocks */ + if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || + sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) + return -EINVAL; + return 0; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 83355ec4a92c..397b1e816b36 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1521,6 +1521,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi) struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned int ovp_segments, reserved_segments; + unsigned int main_segs, blocks_per_seg; + int i; total = le32_to_cpu(raw_super->segment_count); fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); @@ -1542,6 +1544,20 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi) return 1; } + main_segs = le32_to_cpu(raw_super->segment_count_main); + blocks_per_seg = sbi->blocks_per_seg; + + for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { + if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs || + le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) + return 1; + } + for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { + if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs || + le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) + return 1; + } + if (unlikely(f2fs_cp_error(sbi))) { f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); return 1; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 63ee2940775c..309364aab2a5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -380,8 +380,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) struct page *page = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); if (likely(page) && PageDirty(page)) { - __dec_wb_stat(old_wb, WB_RECLAIMABLE); - __inc_wb_stat(new_wb, WB_RECLAIMABLE); + dec_wb_stat(old_wb, WB_RECLAIMABLE); + inc_wb_stat(new_wb, WB_RECLAIMABLE); } } @@ -391,8 +391,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work) &mapping->tree_lock); if (likely(page)) { WARN_ON_ONCE(!PageWriteback(page)); - __dec_wb_stat(old_wb, WB_WRITEBACK); - __inc_wb_stat(new_wb, WB_WRITEBACK); + dec_wb_stat(old_wb, WB_WRITEBACK); + inc_wb_stat(new_wb, WB_WRITEBACK); } } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 3ee4fdc3da9e..76eac2a554c4 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) { struct fuse_file *ff; - ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); + ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL); if (unlikely(!ff)) return NULL; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 959a19ced4d5..dd58a2a95bb6 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -80,9 +80,9 @@ static struct rhashtable_params ht_parms = { static struct rhashtable gl_hash_table; -void gfs2_glock_free(struct gfs2_glock *gl) +static void gfs2_glock_dealloc(struct rcu_head *rcu) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); if (gl->gl_ops->go_flags & GLOF_ASPACE) { kmem_cache_free(gfs2_glock_aspace_cachep, gl); @@ -90,6 +90,13 @@ void gfs2_glock_free(struct gfs2_glock *gl) kfree(gl->gl_lksb.sb_lvbptr); kmem_cache_free(gfs2_glock_cachep, gl); } +} + +void gfs2_glock_free(struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + + call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); if (atomic_dec_and_test(&sdp->sd_glock_disposal)) wake_up(&sdp->sd_glock_wait); } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index b7cf65d13561..7550975c39ab 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -374,6 +374,7 @@ struct gfs2_glock { loff_t end; } gl_vm; }; + struct rcu_head gl_rcu; struct rhash_head gl_node; }; diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c index 9b92058a1240..6bb5d7c42888 100644 --- a/fs/hfsplus/posix_acl.c +++ b/fs/hfsplus/posix_acl.c @@ -51,8 +51,8 @@ struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type) return acl; } -int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, - int type) +static int __hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, + int type) { int err; char *xattr_name; @@ -64,12 +64,6 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, switch (type) { case ACL_TYPE_ACCESS: xattr_name = XATTR_NAME_POSIX_ACL_ACCESS; - if (acl) { - err = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (err) - return err; - } - err = 0; break; case ACL_TYPE_DEFAULT: @@ -105,6 +99,18 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, return err; } +int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + int err; + + if (type == ACL_TYPE_ACCESS && acl) { + err = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (err) + return err; + } + return __hfsplus_set_posix_acl(inode, acl, type); +} + int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir) { int err = 0; @@ -122,15 +128,15 @@ int hfsplus_init_posix_acl(struct inode *inode, struct inode *dir) return err; if (default_acl) { - err = hfsplus_set_posix_acl(inode, default_acl, - ACL_TYPE_DEFAULT); + err = __hfsplus_set_posix_acl(inode, default_acl, + ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!err) - err = hfsplus_set_posix_acl(inode, acl, - ACL_TYPE_ACCESS); + err = __hfsplus_set_posix_acl(inode, acl, + ACL_TYPE_ACCESS); posix_acl_release(acl); } return err; diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 7bc186f4ed4d..1be45c8d460d 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -77,13 +77,6 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type, switch (type) { case ACL_TYPE_ACCESS: ea_name = XATTR_NAME_POSIX_ACL_ACCESS; - if (acl) { - rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (rc) - return rc; - inode->i_ctime = current_time(inode); - mark_inode_dirty(inode); - } break; case ACL_TYPE_DEFAULT: ea_name = XATTR_NAME_POSIX_ACL_DEFAULT; @@ -118,9 +111,17 @@ int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) tid = txBegin(inode->i_sb, 0); mutex_lock(&JFS_IP(inode)->commit_mutex); + if (type == ACL_TYPE_ACCESS && acl) { + rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (rc) + goto end_tx; + inode->i_ctime = current_time(inode); + mark_inode_dirty(inode); + } rc = __jfs_set_acl(tid, inode, type, acl); if (!rc) rc = txCommit(tid, 1, &inode, 0); +end_tx: txEnd(tid); mutex_unlock(&JFS_IP(inode)->commit_mutex); return rc; diff --git a/fs/mount.h b/fs/mount.h index bf1fda6eed8f..de45d9e76748 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -58,6 +58,7 @@ struct mount { struct mnt_namespace *mnt_ns; /* containing namespace */ struct mountpoint *mnt_mp; /* where is it mounted */ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ + struct list_head mnt_umounting; /* list entry for umount propagation */ #ifdef CONFIG_FSNOTIFY struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; diff --git a/fs/namei.c b/fs/namei.c index 6571a5f5112e..281c1f7fa983 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -4362,11 +4362,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, { int error; bool is_dir = d_is_dir(old_dentry); - const unsigned char *old_name; struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; + struct name_snapshot old_name; if (source == target) return 0; @@ -4413,7 +4413,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (error) return error; - old_name = fsnotify_oldname_init(old_dentry->d_name.name); + take_dentry_name_snapshot(&old_name, old_dentry); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); @@ -4468,14 +4468,14 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, inode_unlock(target); dput(new_dentry); if (!error) { - fsnotify_move(old_dir, new_dir, old_name, is_dir, + fsnotify_move(old_dir, new_dir, old_name.name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, new_is_dir, NULL, new_dentry); } } - fsnotify_oldname_free(old_name); + release_dentry_name_snapshot(&old_name); return error; } diff --git a/fs/namespace.c b/fs/namespace.c index 5a4438445bf7..f70914a859a4 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -236,6 +236,7 @@ static struct mount *alloc_vfsmnt(const char *name) INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); INIT_HLIST_NODE(&mnt->mnt_mp_list); + INIT_LIST_HEAD(&mnt->mnt_umounting); init_fs_pin(&mnt->mnt_umount, drop_mountpoint); } return mnt; diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 69d02cf8cf37..5f93cfacb3d1 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT config PNFS_BLOCK tristate depends on NFS_V4_1 && BLK_DEV_DM + depends on 64BIT || LBDAF default NFS_V4 config PNFS_FLEXFILE_LAYOUT diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2ac00bf4ecf1..5f0d08b2e1e5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1115,11 +1115,13 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) /* Force a full look up iff the parent directory has changed */ if (!nfs_is_exclusive_create(dir, flags) && nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) { - - if (nfs_lookup_verify_inode(inode, flags)) { + error = nfs_lookup_verify_inode(inode, flags); + if (error) { if (flags & LOOKUP_RCU) return -ECHILD; - goto out_zap_parent; + if (error == -ESTALE) + goto out_zap_parent; + goto out_error; } nfs_advise_use_readdirplus(dir); goto out_valid; @@ -1144,8 +1146,10 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) trace_nfs_lookup_revalidate_enter(dir, dentry, flags); error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label); trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error); - if (error) + if (error == -ESTALE || error == -ENOENT) goto out_bad; + if (error) + goto out_error; if (nfs_compare_fh(NFS_FH(inode), fhandle)) goto out_bad; if ((error = nfs_refresh_inode(inode, fattr)) != 0) diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 5713eb32a45e..d264363559db 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -750,7 +750,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local) */ nfs_sync_mapping(filp->f_mapping); if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) - nfs_zap_mapping(inode, filp->f_mapping); + nfs_zap_caches(inode); out: return status; } diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 1cf85d65b748..3e486cd01caf 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -172,6 +172,7 @@ static int filelayout_async_handle_error(struct rpc_task *task, case -NFS4ERR_RETRY_UNCACHED_REP: break; /* Invalidate Layout errors */ + case -NFS4ERR_ACCESS: case -NFS4ERR_PNFS_NO_LAYOUT: case -ESTALE: /* mapped NFS4ERR_STALE */ case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ @@ -202,10 +203,10 @@ static int filelayout_async_handle_error(struct rpc_task *task, task->tk_status); nfs4_mark_deviceid_unavailable(devid); pnfs_error_mark_layout_for_return(inode, lseg); + pnfs_set_lo_fail(lseg); rpc_wake_up(&tbl->slot_tbl_waitq); /* fall through */ default: - pnfs_set_lo_fail(lseg); reset: dprintk("%s Retry through MDS. Error %d\n", __func__, task->tk_status); diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index 6df7a0cf5660..f32c58bbe556 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -32,6 +32,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds) { nfs4_print_deviceid(&mirror_ds->id_node.deviceid); nfs4_pnfs_ds_put(mirror_ds->ds); + kfree(mirror_ds->ds_versions); kfree_rcu(mirror_ds, id_node.rcu); } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 1de93ba78dc9..8c465d3c7e05 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1315,9 +1315,9 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat return 0; /* Has the inode gone and changed behind our back? */ if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) - return -EIO; + return -ESTALE; if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) - return -EIO; + return -ESTALE; if (!nfs_file_has_buffered_writers(nfsi)) { /* Verify a few of the more important attributes */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dbfa18900e25..074169a54162 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6441,7 +6441,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&q->lock, flags); - freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT); + freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT); } finish_wait(q, &wait); @@ -7407,7 +7407,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data) cdata->res.server_scope = NULL; } /* Save the EXCHANGE_ID verifier session trunk tests */ - memcpy(clp->cl_confirm.data, cdata->args.verifier->data, + memcpy(clp->cl_confirm.data, cdata->args.verifier.data, sizeof(clp->cl_confirm.data)); } out: @@ -7444,7 +7444,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = { static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, u32 sp4_how, struct rpc_xprt *xprt) { - nfs4_verifier verifier; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], .rpc_cred = cred, @@ -7468,8 +7467,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, return -ENOMEM; } - if (!xprt) - nfs4_init_boot_verifier(clp, &verifier); + nfs4_init_boot_verifier(clp, &calldata->args.verifier); status = nfs4_init_uniform_client_string(clp); if (status) @@ -7510,9 +7508,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, task_setup_data.rpc_xprt = xprt; task_setup_data.flags = RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC; - calldata->args.verifier = &clp->cl_confirm; - } else { - calldata->args.verifier = &verifier; + memcpy(calldata->args.verifier.data, clp->cl_confirm.data, + sizeof(calldata->args.verifier.data)); } calldata->args.client = clp; #ifdef CONFIG_NFS_V4_1_MIGRATION diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 3aebfdc82b30..b0cbee2b2422 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1765,7 +1765,7 @@ static void encode_exchange_id(struct xdr_stream *xdr, int len = 0; encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr); - encode_nfs4_verifier(xdr, args->verifier); + encode_nfs4_verifier(xdr, &args->verifier); encode_string(xdr, strlen(args->client->cl_owner_id), args->client->cl_owner_id); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index ad92b401326c..7ddba5022948 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -779,6 +779,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, gfp_t gfp_flags = GFP_KERNEL; pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); + pg_array->npages = pagecount; if (pagecount <= ARRAY_SIZE(pg_array->page_array)) pg_array->pagevec = pg_array->page_array; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 26780d53a6f9..ed8d6b73d12a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -144,7 +144,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp) argp->p = page_address(argp->pagelist[0]); argp->pagelist++; if (argp->pagelen < PAGE_SIZE) { - argp->end = argp->p + (argp->pagelen>>2); + argp->end = argp->p + XDR_QUADLEN(argp->pagelen); argp->pagelen = 0; } else { argp->end = argp->p + (PAGE_SIZE>>2); @@ -1279,9 +1279,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) argp->pagelen -= pages * PAGE_SIZE; len -= pages * PAGE_SIZE; - argp->p = (__be32 *)page_address(argp->pagelist[0]); - argp->pagelist++; - argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); + next_decode_page(argp); } argp->p += XDR_QUADLEN(len); diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 01a9f0f007d4..0c4583b61717 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -161,16 +161,20 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask if (unlikely(!fsnotify_inode_watches_children(p_inode))) __fsnotify_update_child_dentry_flags(p_inode); else if (p_inode->i_fsnotify_mask & mask) { + struct name_snapshot name; + /* we are notifying a parent so come up with the new mask which * specifies these are events which came from a child. */ mask |= FS_EVENT_ON_CHILD; + take_dentry_name_snapshot(&name, dentry); if (path) ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH, - dentry->d_name.name, 0); + name.name, 0); else ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE, - dentry->d_name.name, 0); + name.name, 0); + release_dentry_name_snapshot(&name); } dput(parent); diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index dc22ba8c710f..e50a387959bf 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -240,18 +240,6 @@ int ocfs2_set_acl(handle_t *handle, switch (type) { case ACL_TYPE_ACCESS: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; - if (acl) { - umode_t mode; - - ret = posix_acl_update_mode(inode, &mode, &acl); - if (ret) - return ret; - - ret = ocfs2_acl_set_mode(inode, di_bh, - handle, mode); - if (ret) - return ret; - } break; case ACL_TYPE_DEFAULT: name_index = OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT; @@ -289,7 +277,19 @@ int ocfs2_iop_set_acl(struct inode *inode, struct posix_acl *acl, int type) had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh); if (had_lock < 0) return had_lock; + if (type == ACL_TYPE_ACCESS && acl) { + umode_t mode; + + status = posix_acl_update_mode(inode, &mode, &acl); + if (status) + goto unlock; + + status = ocfs2_acl_set_mode(inode, bh, NULL, mode); + if (status) + goto unlock; + } status = ocfs2_set_acl(NULL, inode, bh, type, acl, NULL, NULL); +unlock: ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); brelse(bh); return status; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index a63a71656e9b..bacb1809e941 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -478,17 +478,30 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, } static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, - struct cattr *attr, struct dentry *hardlink) + struct cattr *attr, struct dentry *hardlink, + bool origin) { int err; const struct cred *old_cred; struct cred *override_cred; + struct dentry *parent = dentry->d_parent; - err = ovl_copy_up(dentry->d_parent); + err = ovl_copy_up(parent); if (err) return err; old_cred = ovl_override_creds(dentry->d_sb); + + /* + * When linking a file with copy up origin into a new parent, mark the + * new parent dir "impure". + */ + if (origin) { + err = ovl_set_impure(parent, ovl_dentry_upper(parent)); + if (err) + goto out_revert_creds; + } + err = -ENOMEM; override_cred = prepare_creds(); if (override_cred) { @@ -547,7 +560,7 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev, inode_init_owner(inode, dentry->d_parent->d_inode, mode); attr.mode = inode->i_mode; - err = ovl_create_or_link(dentry, inode, &attr, NULL); + err = ovl_create_or_link(dentry, inode, &attr, NULL, false); if (err) iput(inode); @@ -601,7 +614,8 @@ static int ovl_link(struct dentry *old, struct inode *newdir, inode = d_inode(old); ihold(inode); - err = ovl_create_or_link(new, inode, NULL, ovl_dentry_upper(old)); + err = ovl_create_or_link(new, inode, NULL, ovl_dentry_upper(old), + ovl_type_origin(old)); if (err) iput(inode); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 4882ffb37bae..04d0c7ed645e 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -946,6 +946,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) else sb->s_d_op = &ovl_dentry_operations; + err = -ENOMEM; ufs->creator_cred = cred = prepare_creds(); if (!cred) goto out_put_lower_mnt; diff --git a/fs/pnode.c b/fs/pnode.c index 5bc7896d122a..53d411a371ce 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p) return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); } +static inline struct mount *last_slave(struct mount *p) +{ + return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); +} + static inline struct mount *next_slave(struct mount *p) { return list_entry(p->mnt_slave.next, struct mount, mnt_slave); @@ -162,6 +167,19 @@ static struct mount *propagation_next(struct mount *m, } } +static struct mount *skip_propagation_subtree(struct mount *m, + struct mount *origin) +{ + /* + * Advance m such that propagation_next will not return + * the slaves of m. + */ + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) + m = last_slave(m); + + return m; +} + static struct mount *next_group(struct mount *m, struct mount *origin) { while (1) { @@ -413,65 +431,104 @@ void propagate_mount_unlock(struct mount *mnt) } } -/* - * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. - */ -static void mark_umount_candidates(struct mount *mnt) +static void umount_one(struct mount *mnt, struct list_head *to_umount) { - struct mount *parent = mnt->mnt_parent; - struct mount *m; - - BUG_ON(parent == mnt); - - for (m = propagation_next(parent, parent); m; - m = propagation_next(m, parent)) { - struct mount *child = __lookup_mnt(&m->mnt, - mnt->mnt_mountpoint); - if (!child || (child->mnt.mnt_flags & MNT_UMOUNT)) - continue; - if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) { - SET_MNT_MARK(child); - } - } + CLEAR_MNT_MARK(mnt); + mnt->mnt.mnt_flags |= MNT_UMOUNT; + list_del_init(&mnt->mnt_child); + list_del_init(&mnt->mnt_umounting); + list_move_tail(&mnt->mnt_list, to_umount); } /* * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ -static void __propagate_umount(struct mount *mnt) +static bool __propagate_umount(struct mount *mnt, + struct list_head *to_umount, + struct list_head *to_restore) { - struct mount *parent = mnt->mnt_parent; - struct mount *m; + bool progress = false; + struct mount *child; - BUG_ON(parent == mnt); + /* + * The state of the parent won't change if this mount is + * already unmounted or marked as without children. + */ + if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) + goto out; - for (m = propagation_next(parent, parent); m; - m = propagation_next(m, parent)) { - struct mount *topper; - struct mount *child = __lookup_mnt(&m->mnt, - mnt->mnt_mountpoint); - /* - * umount the child only if the child has no children - * and the child is marked safe to unmount. - */ - if (!child || !IS_MNT_MARKED(child)) + /* Verify topper is the only grandchild that has not been + * speculatively unmounted. + */ + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { + if (child->mnt_mountpoint == mnt->mnt.mnt_root) continue; - CLEAR_MNT_MARK(child); + if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) + continue; + /* Found a mounted child */ + goto children; + } - /* If there is exactly one mount covering all of child - * replace child with that mount. - */ - topper = find_topper(child); - if (topper) - mnt_change_mountpoint(child->mnt_parent, child->mnt_mp, - topper); + /* Mark mounts that can be unmounted if not locked */ + SET_MNT_MARK(mnt); + progress = true; + + /* If a mount is without children and not locked umount it. */ + if (!IS_MNT_LOCKED(mnt)) { + umount_one(mnt, to_umount); + } else { +children: + list_move_tail(&mnt->mnt_umounting, to_restore); + } +out: + return progress; +} + +static void umount_list(struct list_head *to_umount, + struct list_head *to_restore) +{ + struct mount *mnt, *child, *tmp; + list_for_each_entry(mnt, to_umount, mnt_list) { + list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { + /* topper? */ + if (child->mnt_mountpoint == mnt->mnt.mnt_root) + list_move_tail(&child->mnt_umounting, to_restore); + else + umount_one(child, to_umount); + } + } +} - if (list_empty(&child->mnt_mounts)) { - list_del_init(&child->mnt_child); - child->mnt.mnt_flags |= MNT_UMOUNT; - list_move_tail(&child->mnt_list, &mnt->mnt_list); +static void restore_mounts(struct list_head *to_restore) +{ + /* Restore mounts to a clean working state */ + while (!list_empty(to_restore)) { + struct mount *mnt, *parent; + struct mountpoint *mp; + + mnt = list_first_entry(to_restore, struct mount, mnt_umounting); + CLEAR_MNT_MARK(mnt); + list_del_init(&mnt->mnt_umounting); + + /* Should this mount be reparented? */ + mp = mnt->mnt_mp; + parent = mnt->mnt_parent; + while (parent->mnt.mnt_flags & MNT_UMOUNT) { + mp = parent->mnt_mp; + parent = parent->mnt_parent; } + if (parent != mnt->mnt_parent) + mnt_change_mountpoint(parent, mp, mnt); + } +} + +static void cleanup_umount_visitations(struct list_head *visited) +{ + while (!list_empty(visited)) { + struct mount *mnt = + list_first_entry(visited, struct mount, mnt_umounting); + list_del_init(&mnt->mnt_umounting); } } @@ -485,11 +542,68 @@ static void __propagate_umount(struct mount *mnt) int propagate_umount(struct list_head *list) { struct mount *mnt; + LIST_HEAD(to_restore); + LIST_HEAD(to_umount); + LIST_HEAD(visited); + + /* Find candidates for unmounting */ + list_for_each_entry_reverse(mnt, list, mnt_list) { + struct mount *parent = mnt->mnt_parent; + struct mount *m; + + /* + * If this mount has already been visited it is known that it's + * entire peer group and all of their slaves in the propagation + * tree for the mountpoint has already been visited and there is + * no need to visit them again. + */ + if (!list_empty(&mnt->mnt_umounting)) + continue; + + list_add_tail(&mnt->mnt_umounting, &visited); + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { + struct mount *child = __lookup_mnt(&m->mnt, + mnt->mnt_mountpoint); + if (!child) + continue; + + if (!list_empty(&child->mnt_umounting)) { + /* + * If the child has already been visited it is + * know that it's entire peer group and all of + * their slaves in the propgation tree for the + * mountpoint has already been visited and there + * is no need to visit this subtree again. + */ + m = skip_propagation_subtree(m, parent); + continue; + } else if (child->mnt.mnt_flags & MNT_UMOUNT) { + /* + * We have come accross an partially unmounted + * mount in list that has not been visited yet. + * Remember it has been visited and continue + * about our merry way. + */ + list_add_tail(&child->mnt_umounting, &visited); + continue; + } + + /* Check the child and parents while progress is made */ + while (__propagate_umount(child, + &to_umount, &to_restore)) { + /* Is the parent a umount candidate? */ + child = child->mnt_parent; + if (list_empty(&child->mnt_umounting)) + break; + } + } + } - list_for_each_entry_reverse(mnt, list, mnt_list) - mark_umount_candidates(mnt); + umount_list(&to_umount, &to_restore); + restore_mounts(&to_restore); + cleanup_umount_visitations(&visited); + list_splice_tail(&to_umount, list); - list_for_each_entry(mnt, list, mnt_list) - __propagate_umount(mnt); return 0; } diff --git a/fs/proc/internal.h b/fs/proc/internal.h index c5ae09b6c726..18694598bebf 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -67,7 +67,7 @@ struct proc_inode { struct proc_dir_entry *pde; struct ctl_table_header *sysctl; struct ctl_table *sysctl_entry; - struct list_head sysctl_inodes; + struct hlist_node sysctl_inodes; const struct proc_ns_operations *ns_ops; struct inode vfs_inode; }; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 67985a7233c2..9bf06e2b1284 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -191,7 +191,7 @@ static void init_header(struct ctl_table_header *head, head->set = set; head->parent = NULL; head->node = node; - INIT_LIST_HEAD(&head->inodes); + INIT_HLIST_HEAD(&head->inodes); if (node) { struct ctl_table *entry; for (entry = table; entry->procname; entry++, node++) @@ -261,25 +261,42 @@ static void unuse_table(struct ctl_table_header *p) complete(p->unregistering); } -/* called under sysctl_lock */ static void proc_sys_prune_dcache(struct ctl_table_header *head) { - struct inode *inode, *prev = NULL; + struct inode *inode; struct proc_inode *ei; + struct hlist_node *node; + struct super_block *sb; rcu_read_lock(); - list_for_each_entry_rcu(ei, &head->inodes, sysctl_inodes) { - inode = igrab(&ei->vfs_inode); - if (inode) { - rcu_read_unlock(); - iput(prev); - prev = inode; - d_prune_aliases(inode); + for (;;) { + node = hlist_first_rcu(&head->inodes); + if (!node) + break; + ei = hlist_entry(node, struct proc_inode, sysctl_inodes); + spin_lock(&sysctl_lock); + hlist_del_init_rcu(&ei->sysctl_inodes); + spin_unlock(&sysctl_lock); + + inode = &ei->vfs_inode; + sb = inode->i_sb; + if (!atomic_inc_not_zero(&sb->s_active)) + continue; + inode = igrab(inode); + rcu_read_unlock(); + if (unlikely(!inode)) { + deactivate_super(sb); rcu_read_lock(); + continue; } + + d_prune_aliases(inode); + iput(inode); + deactivate_super(sb); + + rcu_read_lock(); } rcu_read_unlock(); - iput(prev); } /* called under sysctl_lock, will reacquire if has to wait */ @@ -461,7 +478,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, } ei->sysctl = head; ei->sysctl_entry = table; - list_add_rcu(&ei->sysctl_inodes, &head->inodes); + hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes); head->count++; spin_unlock(&sysctl_lock); @@ -489,7 +506,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head) { spin_lock(&sysctl_lock); - list_del_rcu(&PROC_I(inode)->sysctl_inodes); + hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes); if (!--head->count) kfree_rcu(head, rcu); spin_unlock(&sysctl_lock); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index d468eec9b8a6..4c5cd9368460 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -770,8 +770,11 @@ static void decompress_record(struct pstore_record *record) int unzipped_len; char *decompressed; + if (!record->compressed) + return; + /* Only PSTORE_TYPE_DMESG support compression. */ - if (!record->compressed || record->type != PSTORE_TYPE_DMESG) { + if (record->type != PSTORE_TYPE_DMESG) { pr_warn("ignored compressed record type %d\n", record->type); return; } @@ -846,8 +849,10 @@ void pstore_get_backend_records(struct pstore_info *psi, record->size = psi->read(record); /* No more records left in backend? */ - if (record->size <= 0) + if (record->size <= 0) { + kfree(record); break; + } decompress_record(record); rc = pstore_mkfile(root, record); diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 3d2256a425ee..d92a1dc6ee70 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) error = journal_begin(&th, inode->i_sb, jcreate_blocks); reiserfs_write_unlock(inode->i_sb); if (error == 0) { + if (type == ACL_TYPE_ACCESS && acl) { + error = posix_acl_update_mode(inode, &inode->i_mode, + &acl); + if (error) + goto unlock; + } error = __reiserfs_set_acl(&th, inode, type, acl); +unlock: reiserfs_write_lock(inode->i_sb); error2 = journal_end(&th); reiserfs_write_unlock(inode->i_sb); @@ -241,11 +248,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, switch (type) { case ACL_TYPE_ACCESS: name = XATTR_NAME_POSIX_ACL_ACCESS; - if (acl) { - error = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (error) - return error; - } break; case ACL_TYPE_DEFAULT: name = XATTR_NAME_POSIX_ACL_DEFAULT; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 566079d9b402..c67f6a3a606c 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -143,6 +143,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, struct inode *dir, case S_IFBLK: case S_IFCHR: inode->i_op = &ubifs_file_inode_operations; + encrypted = false; break; default: BUG(); diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 294519b98874..df1aff716fab 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -574,7 +574,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, /* Make sure to also account for extended attributes */ len += host_ui->data_len; - dent = kmalloc(len, GFP_NOFS); + dent = kzalloc(len, GFP_NOFS); if (!dent) return -ENOMEM; @@ -967,7 +967,7 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, if (twoparents) len += plen; - dent1 = kmalloc(len, GFP_NOFS); + dent1 = kzalloc(len, GFP_NOFS); if (!dent1) return -ENOMEM; @@ -984,6 +984,7 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, dent1->nlen = cpu_to_le16(fname_len(snd_nm)); memcpy(dent1->name, fname_name(snd_nm), fname_len(snd_nm)); dent1->name[fname_len(snd_nm)] = '\0'; + set_dent_cookie(c, dent1); zero_dent_node_unused(dent1); ubifs_prep_grp_node(c, dent1, dlen1, 0); @@ -996,6 +997,7 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, dent2->nlen = cpu_to_le16(fname_len(fst_nm)); memcpy(dent2->name, fname_name(fst_nm), fname_len(fst_nm)); dent2->name[fname_len(fst_nm)] = '\0'; + set_dent_cookie(c, dent2); zero_dent_node_unused(dent2); ubifs_prep_grp_node(c, dent2, dlen2, 0); @@ -1117,7 +1119,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8); if (move) len += plen; - dent = kmalloc(len, GFP_NOFS); + dent = kzalloc(len, GFP_NOFS); if (!dent) return -ENOMEM; @@ -1500,7 +1502,7 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host, hlen = host_ui->data_len + UBIFS_INO_NODE_SZ; len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8); - xent = kmalloc(len, GFP_NOFS); + xent = kzalloc(len, GFP_NOFS); if (!xent) return -ENOMEM; @@ -1607,7 +1609,7 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode, aligned_len1 = ALIGN(len1, 8); aligned_len = aligned_len1 + ALIGN(len2, 8); - ino = kmalloc(aligned_len, GFP_NOFS); + ino = kzalloc(aligned_len, GFP_NOFS); if (!ino) return -ENOMEM; diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 709aa098dd46..96374a39ffba 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -2802,6 +2802,8 @@ int ubifs_tnc_remove_ino(struct ubifs_info *c, ino_t inum) dbg_tnc("xent '%s', ino %lu", xent->name, (unsigned long)xattr_inum); + ubifs_evict_xattr_inode(c, xattr_inum); + fname_name(&nm) = xent->name; fname_len(&nm) = le16_to_cpu(xent->nlen); err = ubifs_tnc_remove_nm(c, &key1, &nm); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 298b4d89eee9..f9db88c775c7 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1757,6 +1757,7 @@ int ubifs_xattr_set(struct inode *host, const char *name, const void *value, size_t size, int flags); ssize_t ubifs_xattr_get(struct inode *host, const char *name, void *buf, size_t size); +void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum); #ifdef CONFIG_UBIFS_FS_SECURITY extern int ubifs_init_security(struct inode *dentry, struct inode *inode, diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 6c9e62c2ef55..98f11257d66c 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -513,6 +513,28 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host, return err; } +/** + * ubifs_evict_xattr_inode - Evict an xattr inode. + * @c: UBIFS file-system description object + * @xattr_inum: xattr inode number + * + * When an inode that hosts xattrs is being removed we have to make sure + * that cached inodes of the xattrs also get removed from the inode cache + * otherwise we'd waste memory. This function looks up an inode from the + * inode cache and clears the link counter such that iput() will evict + * the inode. + */ +void ubifs_evict_xattr_inode(struct ubifs_info *c, ino_t xattr_inum) +{ + struct inode *inode; + + inode = ilookup(c->vfs_sb, xattr_inum); + if (inode) { + clear_nlink(inode); + iput(inode); + } +} + static int ubifs_xattr_remove(struct inode *host, const char *name) { struct inode *inode; diff --git a/fs/udf/file.c b/fs/udf/file.c index f5eb2d5b3bac..e06d2c15749a 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -43,10 +43,15 @@ static void __udf_adinicb_readpage(struct page *page) struct inode *inode = page->mapping->host; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); + loff_t isize = i_size_read(inode); + /* + * We have to be careful here as truncate can change i_size under us. + * So just sample it once and use the same value everywhere. + */ kaddr = kmap_atomic(page); - memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); - memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size); + memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, isize); + memset(kaddr + isize, 0, PAGE_SIZE - isize); flush_dcache_page(page); SetPageUptodate(page); kunmap_atomic(kaddr); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 98c510e17203..18fdb9d90812 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1222,8 +1222,8 @@ int udf_setsize(struct inode *inode, loff_t newsize) return err; } set_size: - truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); + truncate_setsize(inode, newsize); } else { if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { down_write(&iinfo->i_data_sem); @@ -1240,9 +1240,9 @@ int udf_setsize(struct inode *inode, loff_t newsize) udf_get_block); if (err) return err; + truncate_setsize(inode, newsize); down_write(&iinfo->i_data_sem); udf_clear_extent_cache(inode); - truncate_setsize(inode, newsize); udf_truncate_extents(inode); up_write(&iinfo->i_data_sem); } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 1d622f276e3a..26f9591b04b1 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -851,6 +851,9 @@ static int userfaultfd_release(struct inode *inode, struct file *file) __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range); spin_unlock(&ctx->fault_pending_wqh.lock); + /* Flush pending events that may still wait on event_wqh */ + wake_up_all(&ctx->event_wqh); + wake_up_poll(&ctx->fd_wqh, POLLHUP); userfaultfd_ctx_put(ctx); return 0; @@ -1645,6 +1648,8 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, uffdio_zeropage.range.len); mmput(ctx->mm); + } else { + return -ENOSPC; } if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) return -EFAULT; diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index b468e041f207..7034e17535de 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -170,8 +170,8 @@ xfs_get_acl(struct inode *inode, int type) return acl; } -STATIC int -__xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) +int +__xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) { struct xfs_inode *ip = XFS_I(inode); unsigned char *ea_name; @@ -268,5 +268,5 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) } set_acl: - return __xfs_set_acl(inode, type, acl); + return __xfs_set_acl(inode, acl, type); } diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index 286fa89217f5..04327318ef67 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h @@ -24,6 +24,7 @@ struct posix_acl; #ifdef CONFIG_XFS_POSIX_ACL extern struct posix_acl *xfs_get_acl(struct inode *inode, int type); extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); +extern int __xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); #else static inline struct posix_acl *xfs_get_acl(struct inode *inode, int type) { diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ebfc13350f9a..077e2b2ac773 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -190,12 +190,12 @@ xfs_generic_create( #ifdef CONFIG_XFS_POSIX_ACL if (default_acl) { - error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); + error = __xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); if (error) goto out_cleanup_inode; } if (acl) { - error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); + error = __xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); if (error) goto out_cleanup_inode; } diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 82f1cbcc4de1..1f53dc23aebe 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -519,6 +519,7 @@ xlog_discard_endio( INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work); queue_work(xfs_discard_wq, &ctx->discard_endio_work); + bio_put(bio); } static void diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 2eaf81859166..7147d4a8d207 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1209,7 +1209,7 @@ xfs_mod_icount( struct xfs_mount *mp, int64_t delta) { - __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH); + percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH); if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) { ASSERT(0); percpu_counter_add(&mp->m_icount, -delta); @@ -1288,7 +1288,7 @@ xfs_mod_fdblocks( else batch = XFS_FDBLOCKS_BATCH; - __percpu_counter_add(&mp->m_fdblocks, delta, batch); + percpu_counter_add_batch(&mp->m_fdblocks, delta, batch); if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside, XFS_FDBLOCKS_BATCH) >= 0) { /* we had space! */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 314a0b9219c6..a06342f11259 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -59,6 +59,22 @@ /* Align . to a 8 byte boundary equals to maximum function alignment. */ #define ALIGN_FUNCTION() . = ALIGN(8) +/* + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which + * generates .data.identifier sections, which need to be pulled in with + * .data. We don't want to pull in .data..other sections, which Linux + * has defined. Same for text and bss. + */ +#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION +#define TEXT_MAIN .text .text.[0-9a-zA-Z_]* +#define DATA_MAIN .data .data.[0-9a-zA-Z_]* +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* +#else +#define TEXT_MAIN .text +#define DATA_MAIN .data +#define BSS_MAIN .bss +#endif + /* * Align to a 32 byte boundary equal to the * alignment gcc 4.5 uses for a struct @@ -199,12 +215,9 @@ /* * .data section - * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates - * .data.identifier which needs to be pulled in with .data, but don't want to - * pull in .data..stuff which has its own requirements. Same for bss. */ #define DATA_DATA \ - *(.data .data.[0-9a-zA-Z_]*) \ + *(DATA_MAIN) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ MEM_KEEP(init.data) \ @@ -435,16 +448,17 @@ VMLINUX_SYMBOL(__security_initcall_end) = .; \ } -/* .text section. Map to function alignment to avoid address changes +/* + * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map - * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates - * .text.identifier which needs to be pulled in with .text , but some - * architectures define .text.foo which is not intended to be pulled in here. - * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have - * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */ + * + * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead + * code elimination is enabled, so these sections should be converted + * to use ".." first. + */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot .text .text.fixup .text.unlikely) \ + *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ *(.ref.text) \ MEM_KEEP(init.text) \ MEM_KEEP(exit.text) \ @@ -613,7 +627,7 @@ BSS_FIRST_SECTIONS \ *(.bss..page_aligned) \ *(.dynbss) \ - *(.bss .bss.[0-9a-zA-Z_]*) \ + *(BSS_MAIN) \ *(COMMON) \ } diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 557d84063934..e9c967b86054 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -66,37 +66,17 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) static inline void __add_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item, s64 amount) { - __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH); -} - -static inline void __inc_wb_stat(struct bdi_writeback *wb, - enum wb_stat_item item) -{ - __add_wb_stat(wb, item, 1); + percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); } static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { - unsigned long flags; - - local_irq_save(flags); - __inc_wb_stat(wb, item); - local_irq_restore(flags); -} - -static inline void __dec_wb_stat(struct bdi_writeback *wb, - enum wb_stat_item item) -{ - __add_wb_stat(wb, item, -1); + __add_wb_stat(wb, item, 1); } static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) { - unsigned long flags; - - local_irq_save(flags); - __dec_wb_stat(wb, item); - local_irq_restore(flags); + __add_wb_stat(wb, item, -1); } static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 01b62e7bac74..7104bea8dab1 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -518,7 +518,7 @@ static inline void blkg_stat_exit(struct blkg_stat *stat) */ static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) { - __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); + percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); } /** @@ -597,14 +597,14 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ]; - __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); + percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); if (op_is_sync(op)) cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; else cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; - __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); + percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH); } /** diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d5093b52b485..88f4289e7eee 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -43,6 +43,7 @@ struct bpf_reg_state { u32 min_align; u32 aux_off; u32 aux_off_align; + bool value_from_signed; }; enum bpf_stack_slot_type { diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index d614c5ea1b5e..de179993e039 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -15,11 +15,3 @@ * with any version that can compile the kernel */ #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) - -/* - * GCC does not warn about unused static inline functions for - * -Wunused-function. This turns out to avoid the need for complex #ifdef - * directives. Suppress the warning in clang as well. - */ -#undef inline -#define inline inline __attribute__((unused)) notrace diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 0efef9cf014f..71fe0994cf1a 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -66,18 +66,22 @@ /* * Force always-inline if the user requests it so via the .config, - * or if gcc is too old: + * or if gcc is too old. + * GCC does not warn about unused static inline functions for + * -Wunused-function. This turns out to avoid the need for complex #ifdef + * directives. Suppress the warning in clang as well by using "unused" + * function attribute, which is redundant but not harmful for gcc. */ #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -#define inline inline __attribute__((always_inline)) notrace -#define __inline__ __inline__ __attribute__((always_inline)) notrace -#define __inline __inline __attribute__((always_inline)) notrace +#define inline inline __attribute__((always_inline,unused)) notrace +#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace +#define __inline __inline __attribute__((always_inline,unused)) notrace #else /* A lot of inline functions can cause havoc with function tracing */ -#define inline inline notrace -#define __inline__ __inline__ notrace -#define __inline __inline notrace +#define inline inline __attribute__((unused)) notrace +#define __inline__ __inline__ __attribute__((unused)) notrace +#define __inline __inline __attribute__((unused)) notrace #endif #define __always_inline inline __attribute__((always_inline)) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 0f2a80377520..30b86efea2bc 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -58,7 +58,6 @@ enum cpuhp_state { CPUHP_XEN_EVTCHN_PREPARE, CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, - CPUHP_BLK_MQ_PREPARE, CPUHP_NET_FLOW_PREPARE, CPUHP_TOPOLOGY_PREPARE, CPUHP_NET_IUCV_PREPARE, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 2404ad238c0b..a21b1fb9a968 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node); (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); + +/** + * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location + * @cpu: the (optionally unsigned) integer iterator + * @mask: the cpumask poiter + * @start: the start location + * + * The implementation does not assume any bit in @mask is set (including @start). + * + * After the loop, cpu is >= nr_cpu_ids. + */ +#define for_each_cpu_wrap(cpu, mask, start) \ + for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \ + (cpu) < nr_cpumask_bits; \ + (cpu) = cpumask_next_wrap((cpu), (mask), (start), true)) + /** * for_each_cpu_and - iterate over every cpu in both masks * @cpu: the (optionally unsigned) integer iterator diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 119a3f9604b0..898cfe2eeb42 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -18,6 +18,19 @@ #ifdef CONFIG_CPUSETS +/* + * Static branch rewrites can happen in an arbitrary order for a given + * key. In code paths where we need to loop with read_mems_allowed_begin() and + * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need + * to ensure that begin() always gets rewritten before retry() in the + * disabled -> enabled transition. If not, then if local irqs are disabled + * around the loop, we can deadlock since retry() would always be + * comparing the latest value of the mems_allowed seqcount against 0 as + * begin() still would see cpusets_enabled() as false. The enabled -> disabled + * transition should happen in reverse order for the same reasons (want to stop + * looking at real value of mems_allowed.sequence in retry() first). + */ +extern struct static_key_false cpusets_pre_enable_key; extern struct static_key_false cpusets_enabled_key; static inline bool cpusets_enabled(void) { @@ -32,12 +45,14 @@ static inline int nr_cpusets(void) static inline void cpuset_inc(void) { + static_branch_inc(&cpusets_pre_enable_key); static_branch_inc(&cpusets_enabled_key); } static inline void cpuset_dec(void) { static_branch_dec(&cpusets_enabled_key); + static_branch_dec(&cpusets_pre_enable_key); } extern int cpuset_init(void); @@ -115,7 +130,7 @@ extern void cpuset_print_current_mems_allowed(void); */ static inline unsigned int read_mems_allowed_begin(void) { - if (!cpusets_enabled()) + if (!static_branch_unlikely(&cpusets_pre_enable_key)) return 0; return read_seqcount_begin(¤t->mems_allowed_seq); @@ -129,7 +144,7 @@ static inline unsigned int read_mems_allowed_begin(void) */ static inline bool read_mems_allowed_retry(unsigned int seq) { - if (!cpusets_enabled()) + if (!static_branch_unlikely(&cpusets_enabled_key)) return false; return read_seqcount_retry(¤t->mems_allowed_seq, seq); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index d2e38dc6172c..025727bf6797 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -591,5 +591,11 @@ static inline struct inode *d_real_inode(const struct dentry *dentry) return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0)); } +struct name_snapshot { + const char *name; + char inline_name[DNAME_INLINE_LEN]; +}; +void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); +void release_dentry_name_snapshot(struct name_snapshot *); #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/device.h b/include/linux/device.h index 9ef518af5515..f240baac2001 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -378,6 +378,7 @@ int subsys_virtual_register(struct bus_type *subsys, * @suspend: Used to put the device to sleep mode, usually to a low power * state. * @resume: Used to bring the device from the sleep mode. + * @shutdown: Called at shut-down time to quiesce the device. * @ns_type: Callbacks so sysfs can detemine namespaces. * @namespace: Namespace of the device belongs to this class. * @pm: The default device power management operations of this class. @@ -407,6 +408,7 @@ struct class { int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); + int (*shutdown)(struct device *dev); const struct kobj_ns_type_operations *ns_type; const void *(*namespace)(struct device *dev); diff --git a/include/linux/fs.h b/include/linux/fs.h index 803e5a9b2654..d6d525039496 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -891,9 +891,9 @@ static inline struct file *get_file(struct file *f) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT) #elif BITS_PER_LONG==64 -#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) +#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX) #endif #define FL_POSIX 1 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index b43d3f5bd9ea..b78aa7ac77ce 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -293,35 +293,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) } } -#if defined(CONFIG_FSNOTIFY) /* notify helpers */ - -/* - * fsnotify_oldname_init - save off the old filename before we change it - */ -static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) -{ - return kstrdup(name, GFP_KERNEL); -} - -/* - * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init - */ -static inline void fsnotify_oldname_free(const unsigned char *old_name) -{ - kfree(old_name); -} - -#else /* CONFIG_FSNOTIFY */ - -static inline const char *fsnotify_oldname_init(const unsigned char *name) -{ - return NULL; -} - -static inline void fsnotify_oldname_free(const unsigned char *old_name) -{ -} - -#endif /* CONFIG_FSNOTIFY */ - #endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 497f2b3a5a62..97f1b465d04f 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -105,6 +105,11 @@ struct st_sensor_fullscale { struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX]; }; +struct st_sensor_sim { + u8 addr; + u8 value; +}; + /** * struct st_sensor_bdu - ST sensor device block data update * @addr: address of the register. @@ -197,6 +202,7 @@ struct st_sensor_transfer_function { * @bdu: Block data update register. * @das: Data Alignment Selection register. * @drdy_irq: Data ready register of the sensor. + * @sim: SPI serial interface mode register of the sensor. * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. * @bootime: samples to discard when sensor passing from power-down to power-up. */ @@ -213,6 +219,7 @@ struct st_sensor_settings { struct st_sensor_bdu bdu; struct st_sensor_das das; struct st_sensor_data_ready_irq drdy_irq; + struct st_sensor_sim sim; bool multi_read_bit; unsigned int bootime; }; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e049526bc188..a2f6707e9fc0 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -170,9 +170,9 @@ extern struct cred init_cred; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ - .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ - .vtime_snap = 0, \ - .vtime_snap_whence = VTIME_SYS, + .vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \ + .vtime.starttime = 0, \ + .vtime.state = VTIME_SYS, #else # define INIT_VTIME(tsk) #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a6fba4804672..0991f973f8ca 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -291,7 +291,7 @@ extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); -int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd); +int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -331,7 +331,7 @@ irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) } static inline int -irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) +irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) { return maxvec; } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2cb54adc4a33..176f7569d874 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -240,7 +240,7 @@ struct iommu_device { struct list_head list; const struct iommu_ops *ops; struct fwnode_handle *fwnode; - struct device dev; + struct device *dev; }; int iommu_device_register(struct iommu_device *iommu); @@ -265,6 +265,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu, iommu->fwnode = fwnode; } +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return (struct iommu_device *)dev_get_drvdata(dev); +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ @@ -589,6 +594,11 @@ static inline void iommu_device_set_fwnode(struct iommu_device *iommu, { } +static inline struct iommu_device *dev_to_iommu_device(struct device *dev) +{ + return NULL; +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index cb0ba9f2a9a2..fa7fd03cb5f9 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -44,6 +44,7 @@ struct list_lru_node { /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ struct list_lru_memcg *memcg_lrus; #endif + long nr_items; } ____cacheline_aligned_in_smp; struct list_lru { diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 8098695e5d8d..2526c501622f 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -65,6 +65,7 @@ extern bool movable_node_enabled; #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #define __init_memblock __meminit #define __initdata_memblock __meminitdata +void memblock_discard(void); #else #define __init_memblock #define __initdata_memblock @@ -78,8 +79,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, int nid, ulong flags); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); -phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); -phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); @@ -114,6 +113,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, phys_addr_t *out_end); +void __memblock_free_early(phys_addr_t base, phys_addr_t size); +void __memblock_free_late(phys_addr_t base, phys_addr_t size); + /** * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 93273d9ea4d1..ba260330ce5e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -925,6 +925,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); +void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf, int node); int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index edafedb7b509..e21a0b3d6454 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -7718,8 +7718,10 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x7f]; + u8 reserved_at_0[0x7d]; + u8 mtpps_enh_out_per_adj[0x1]; + u8 mtpps_fs[0x1]; u8 pcie_performance_group[0x1]; }; @@ -8115,7 +8117,8 @@ struct mlx5_ifc_mtpps_reg_bits { u8 reserved_at_78[0x4]; u8 cap_pin_4_mode[0x4]; - u8 reserved_at_80[0x80]; + u8 field_select[0x20]; + u8 reserved_at_a0[0x60]; u8 enable[0x1]; u8 reserved_at_101[0xb]; @@ -8130,8 +8133,9 @@ struct mlx5_ifc_mtpps_reg_bits { u8 out_pulse_duration[0x10]; u8 out_periodic_adjustment[0x10]; + u8 enhanced_out_periodic_adjustment[0x20]; - u8 reserved_at_1a0[0x60]; + u8 reserved_at_1c0[0x20]; }; struct mlx5_ifc_mtppse_reg_bits { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 45cdb27791a3..ab8f7e11c160 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -494,6 +494,10 @@ struct mm_struct { * PROT_NONE or PROT_NUMA mapped page. */ bool tlb_flush_pending; +#endif +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH + /* See flush_tlb_batched_pending() */ + bool tlb_flush_batched; #endif struct uprobes_state uprobes_state; #ifdef CONFIG_HUGETLB_PAGE diff --git a/include/linux/mman.h b/include/linux/mman.h index 634c4c51fe3a..c8367041fafd 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -22,7 +22,7 @@ unsigned long vm_memory_committed(void); static inline void vm_acct_memory(long pages) { - __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch); + percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); } static inline void vm_unacct_memory(long pages) diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 8f67b1581683..a3ebb64b1cf4 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -638,10 +638,10 @@ struct nand_buffers { * @tWW_min: WP# transition to WE# low */ struct nand_sdr_timings { - u32 tBERS_max; + u64 tBERS_max; u32 tCCS_min; - u32 tPROG_max; - u32 tR_max; + u64 tPROG_max; + u64 tR_max; u32 tALH_min; u32 tADL_min; u32 tALS_min; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index b28c83475ee8..7882a07d973e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1222,7 +1222,7 @@ struct nfs41_state_protection { struct nfs41_exchange_id_args { struct nfs_client *client; - nfs4_verifier *verifier; + nfs4_verifier verifier; u32 flags; struct nfs41_state_protection state_protect; }; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index aa3cd0878270..a8d4fc3356d2 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -155,6 +155,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace; #define sysctl_softlockup_all_cpu_backtrace 0 #define sysctl_hardlockup_all_cpu_backtrace 0 #endif + +#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ + defined(CONFIG_HARDLOCKUP_DETECTOR) +void watchdog_update_hrtimer_threshold(u64 period); +#else +static inline void watchdog_update_hrtimer_threshold(u64 period) { } +#endif + extern bool is_hardlockup(void); struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , diff --git a/include/linux/pci.h b/include/linux/pci.h index 8039f9f0ca05..16be18678ca1 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1049,6 +1049,7 @@ void pcie_flr(struct pci_dev *dev); int __pci_reset_function(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); +int pci_reset_function_locked(struct pci_dev *dev); int pci_try_reset_function(struct pci_dev *dev); int pci_probe_reset_slot(struct pci_slot *slot); int pci_reset_slot(struct pci_slot *slot); diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 84a109449610..ec065387f443 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -39,7 +39,8 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); -void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, + s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); @@ -50,7 +51,7 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { - __percpu_counter_add(fbc, amount, percpu_counter_batch); + percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) @@ -136,7 +137,7 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) } static inline void -__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) +percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { percpu_counter_add(fbc, amount); } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 24a635887f28..fc32347473a9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -310,8 +310,8 @@ struct pmu { * Notification that the event was mapped or unmapped. Called * in the context of the mapping task. */ - void (*event_mapped) (struct perf_event *event); /*optional*/ - void (*event_unmapped) (struct perf_event *event); /*optional*/ + void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ + void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ /* * Flags for ->add()/->del()/ ->start()/->stop(). There are diff --git a/include/linux/pid.h b/include/linux/pid.h index 4d179316e431..719582744a2e 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -8,7 +8,9 @@ enum pid_type PIDTYPE_PID, PIDTYPE_PGID, PIDTYPE_SID, - PIDTYPE_MAX + PIDTYPE_MAX, + /* only valid to __task_pid_nr_ns() */ + __PIDTYPE_TGID }; /* diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h index 79b0e4cdb814..f8274b0c6888 100644 --- a/include/linux/platform_data/st_sensors_pdata.h +++ b/include/linux/platform_data/st_sensors_pdata.h @@ -17,10 +17,12 @@ * Available only for accelerometer and pressure sensors. * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet). * @open_drain: set the interrupt line to be open drain if possible. + * @spi_3wire: enable spi-3wire mode. */ struct st_sensors_platform_data { u8 drdy_int_pin; bool open_drain; + bool spi_3wire; }; #endif /* ST_SENSORS_PDATA_H */ diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6b2e0dd88569..feff771e8ea0 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -371,9 +371,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r) __PTR_RING_PEEK_CALL_v; \ }) -static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp) +static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { - return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp); + return kcalloc(size, sizeof(void *), gfp); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) @@ -462,7 +462,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ -static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, + unsigned int nrings, int size, gfp_t gfp, void (*destroy)(void *)) { @@ -470,7 +471,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings, void ***queues; int i; - queues = kmalloc(nrings * sizeof *queues, gfp); + queues = kmalloc_array(nrings, sizeof(*queues), gfp); if (!queues) goto noqueues; diff --git a/include/linux/sched.h b/include/linux/sched.h index 2b69fc650201..35f4517eeba9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -223,6 +223,24 @@ struct task_cputime { #define prof_exp stime #define sched_exp sum_exec_runtime +enum vtime_state { + /* Task is sleeping or running in a CPU with VTIME inactive: */ + VTIME_INACTIVE = 0, + /* Task runs in userspace in a CPU with VTIME active: */ + VTIME_USER, + /* Task runs in kernelspace in a CPU with VTIME active: */ + VTIME_SYS, +}; + +struct vtime { + seqcount_t seqcount; + unsigned long long starttime; + enum vtime_state state; + u64 utime; + u64 stime; + u64 gtime; +}; + struct sched_info { #ifdef CONFIG_SCHED_INFO /* Cumulative counters: */ @@ -670,16 +688,7 @@ struct task_struct { u64 gtime; struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqcount_t vtime_seqcount; - unsigned long long vtime_snap; - enum { - /* Task is sleeping or running in a CPU with VTIME inactive: */ - VTIME_INACTIVE = 0, - /* Task runs in userspace in a CPU with VTIME active: */ - VTIME_USER, - /* Task runs in kernelspace in a CPU with VTIME active: */ - VTIME_SYS, - } vtime_snap_whence; + struct vtime vtime; #endif #ifdef CONFIG_NO_HZ_FULL @@ -1123,13 +1132,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk) return tsk->tgid; } -extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); - -static inline pid_t task_tgid_vnr(struct task_struct *tsk) -{ - return pid_vnr(task_tgid(tsk)); -} - /** * pid_alive - check that a task structure is not stale * @p: Task structure to be checked. @@ -1145,23 +1147,6 @@ static inline int pid_alive(const struct task_struct *p) return p->pids[PIDTYPE_PID].pid != NULL; } -static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) -{ - pid_t pid = 0; - - rcu_read_lock(); - if (pid_alive(tsk)) - pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); - rcu_read_unlock(); - - return pid; -} - -static inline pid_t task_ppid_nr(const struct task_struct *tsk) -{ - return task_ppid_nr_ns(tsk, &init_pid_ns); -} - static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); @@ -1183,6 +1168,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk) return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); } +static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns); +} + +static inline pid_t task_tgid_vnr(struct task_struct *tsk) +{ + return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL); +} + +static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) +{ + pid_t pid = 0; + + rcu_read_lock(); + if (pid_alive(tsk)) + pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); + rcu_read_unlock(); + + return pid; +} + +static inline pid_t task_ppid_nr(const struct task_struct *tsk) +{ + return task_ppid_nr_ns(tsk, &init_pid_ns); +} + /* Obsolete, do not use: */ static inline pid_t task_pgrp_nr(struct task_struct *tsk) { diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index f4dfade428f0..be8b902b5845 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) } static inline int skb_array_resize_multiple(struct skb_array **rings, - int nrings, int size, gfp_t gfp) + int nrings, unsigned int size, + gfp_t gfp) { BUILD_BUG_ON(offsetof(struct skb_array, ring)); return ptr_ring_resize_multiple((struct ptr_ring **)rings, diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 80d07816def0..1c04a26bfd2f 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -143,7 +143,7 @@ struct ctl_table_header struct ctl_table_set *set; struct ctl_dir *parent; struct ctl_node *node; - struct list_head inodes; /* head for proc_inode->sysctl_inodes */ + struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */ }; struct ctl_dir { diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 50398b69ca44..a1f03ebfde47 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -565,9 +565,9 @@ extern void usb_ep0_reinit(struct usb_device *); ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointRequest \ - ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) #define EndpointOutRequest \ - ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ #define HUB_CLASS_REQ(dir, type, request) ((((dir) | (type)) << 8) | (request)) diff --git a/include/linux/vfio.h b/include/linux/vfio.h index edf9b2cad277..9b34d0af5d27 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -97,6 +97,8 @@ extern void vfio_unregister_iommu_driver( */ extern struct vfio_group *vfio_group_get_external_user(struct file *filep); extern void vfio_group_put_external_user(struct vfio_group *group); +extern bool vfio_external_group_match_file(struct vfio_group *group, + struct file *filep); extern int vfio_external_user_iommu_id(struct vfio_group *group); extern long vfio_external_check_extension(struct vfio_group *group, unsigned long arg); diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 0681fe25abeb..18b405e3cd93 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -67,19 +67,12 @@ static inline void vtime_account_system(struct task_struct *tsk) { } #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN extern void arch_vtime_task_switch(struct task_struct *tsk); -extern void vtime_account_user(struct task_struct *tsk); extern void vtime_user_enter(struct task_struct *tsk); - -static inline void vtime_user_exit(struct task_struct *tsk) -{ - vtime_account_user(tsk); -} - +extern void vtime_user_exit(struct task_struct *tsk); extern void vtime_guest_enter(struct task_struct *tsk); extern void vtime_guest_exit(struct task_struct *tsk); extern void vtime_init_idle(struct task_struct *tsk, int cpu); #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ -static inline void vtime_account_user(struct task_struct *tsk) { } static inline void vtime_user_enter(struct task_struct *tsk) { } static inline void vtime_user_exit(struct task_struct *tsk) { } static inline void vtime_guest_enter(struct task_struct *tsk) { } diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index c102ef65cb64..db6dc9dc0482 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -323,6 +323,7 @@ enum { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ + __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ @@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue(fmt, flags, args...) \ - alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) + alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \ + __WQ_ORDERED_EXPLICIT | (flags), 1, ##args) #define create_workqueue(name) \ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) diff --git a/include/net/bonding.h b/include/net/bonding.h index b00508d22e0a..b2e68657a216 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -277,6 +277,11 @@ static inline bool bond_is_lb(const struct bonding *bond) BOND_MODE(bond) == BOND_MODE_ALB; } +static inline bool bond_needs_speed_duplex(const struct bonding *bond) +{ + return BOND_MODE(bond) == BOND_MODE_8023AD || bond_is_lb(bond); +} + static inline bool bond_is_nondyn_tlb(const struct bonding *bond) { return (BOND_MODE(bond) == BOND_MODE_TLB) && diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 5894730ec82a..5932e6de8fc0 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -154,12 +154,12 @@ static inline int frag_mem_limit(struct netns_frags *nf) static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) { - __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch); + percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch); } static inline void add_frag_mem_limit(struct netns_frags *nf, int i) { - __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); + percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch); } static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf) diff --git a/include/net/ip.h b/include/net/ip.h index 821cedcc8e73..0cf7f5a65fe6 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -352,7 +352,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, !forwarding) return dst_mtu(dst); - return min(dst->dev->mtu, IP_MAX_MTU); + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); } static inline unsigned int ip_skb_dst_mtu(struct sock *sk, @@ -364,7 +364,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); } - return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU); + return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); } u32 ip_idents_reserve(u32 hash, int segs); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f5e625f53367..4341731f39a5 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -22,6 +22,7 @@ struct route_info { #include <net/flow.h> #include <net/ip6_fib.h> #include <net/sock.h> +#include <net/lwtunnel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/route.h> @@ -233,4 +234,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, return daddr; } +static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b) +{ + return a->dst.dev == b->dst.dev && + a->rt6i_idev == b->rt6i_idev && + ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) && + !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate); +} #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 22e52093bfda..db5b6b6346b3 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -785,8 +785,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, old = *pold; *pold = new; if (old != NULL) { - qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog); + unsigned int qlen = old->q.qlen; + unsigned int backlog = old->qstats.backlog; + qdisc_reset(old); + qdisc_tree_reduce_backlog(old, qlen, backlog); } sch_tree_unlock(sch); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 069582ee5d7f..06db0c3ec384 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -469,6 +469,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) #define _sctp_walk_params(pos, chunk, end, member)\ for (pos.v = chunk->member;\ + (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\ + (void *)chunk + end) &&\ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ pos.v += SCTP_PAD4(ntohs(pos.p->length))) @@ -479,6 +481,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) #define _sctp_walk_errors(err, chunk_hdr, end)\ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ sizeof(sctp_chunkhdr_t));\ + ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\ + (void *)chunk_hdr + end) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ ntohs(err->length) >= sizeof(sctp_errhdr_t); \ err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length)))) diff --git a/include/net/udp.h b/include/net/udp.h index 3391dbd73959..1933442cf1a6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -265,6 +265,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, } void udp_v4_early_demux(struct sk_buff *skb); +void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); int udp_get_port(struct sock *sk, unsigned short snum, int (*saddr_cmp)(const struct sock *, const struct sock *)); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 49a59202f85e..da7d6b89df77 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -221,9 +221,17 @@ struct vxlan_config { bool no_share; }; +struct vxlan_dev_node { + struct hlist_node hlist; + struct vxlan_dev *vxlan; +}; + /* Pseudo network device */ struct vxlan_dev { - struct hlist_node hlist; /* vni hash table */ + struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */ +#if IS_ENABLED(CONFIG_IPV6) + struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */ +#endif struct list_head next; /* vxlan's per namespace list */ struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */ #if IS_ENABLED(CONFIG_IPV6) diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index b379f93a2c48..16351de31243 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -56,6 +56,7 @@ struct scsi_pointer { /* for scmd->flags */ #define SCMD_TAGGED (1 << 0) +#define SCMD_UNCHECKED_ISA_DMA (1 << 1) struct scsi_cmnd { struct scsi_request req; diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 05641aebd181..148e6511ee0c 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -248,6 +248,7 @@ enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING, STARGET_REMOVE, + STARGET_CREATED_REMOVE, STARGET_DEL, }; diff --git a/include/sound/soc.h b/include/sound/soc.h index 5170fd81e1fd..375893d8d4a5 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -795,10 +795,6 @@ struct snd_soc_component_driver { int (*suspend)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *); - /* pcm creation and destruction */ - int (*pcm_new)(struct snd_soc_pcm_runtime *); - void (*pcm_free)(struct snd_pcm *); - /* DT */ int (*of_xlate_dai_name)(struct snd_soc_component *component, struct of_phandle_args *args, @@ -872,8 +868,6 @@ struct snd_soc_component { void (*remove)(struct snd_soc_component *); int (*suspend)(struct snd_soc_component *); int (*resume)(struct snd_soc_component *); - int (*pcm_new)(struct snd_soc_pcm_runtime *); - void (*pcm_free)(struct snd_pcm *); /* machine specific init */ int (*init)(struct snd_soc_component *component); diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 5f17fb770477..ed766dcb9cb7 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -66,6 +66,14 @@ struct sock; #define TA_DEFAULT_FABRIC_PROT_TYPE 0 /* TPG status needs to be enabled to return sendtargets discovery endpoint info */ #define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1 +/* + * Used to control the sending of keys with optional to respond state bit, + * as a workaround for non RFC compliant initiators,that do not propose, + * nor respond to specific keys required for login to complete. + * + * See iscsi_check_proposer_for_optional_reply() for more details. + */ +#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1 #define ISCSI_IOV_DATA_BUFFER 5 @@ -769,6 +777,7 @@ struct iscsi_tpg_attrib { u8 t10_pi; u32 fabric_prot_type; u32 tpg_enabled_sendtargets; + u32 login_keys_workaround; struct iscsi_portal_group *tpg; }; @@ -778,6 +787,7 @@ struct iscsi_np { int np_sock_type; enum np_thread_state_table np_thread_state; bool enabled; + atomic_t np_reset_count; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h index 307acbc82d80..34b81aa1a2f7 100644 --- a/include/uapi/linux/sched/types.h +++ b/include/uapi/linux/sched/types.h @@ -54,21 +54,21 @@ struct sched_param { * available in the scheduling class file or in Documentation/. */ struct sched_attr { - u32 size; + __u32 size; - u32 sched_policy; - u64 sched_flags; + __u32 sched_policy; + __u64 sched_flags; /* SCHED_NORMAL, SCHED_BATCH */ - s32 sched_nice; + __s32 sched_nice; /* SCHED_FIFO, SCHED_RR */ - u32 sched_priority; + __u32 sched_priority; /* SCHED_DEADLINE */ - u64 sched_runtime; - u64 sched_deadline; - u64 sched_period; + __u64 sched_runtime; + __u64 sched_deadline; + __u64 sched_period; }; #endif /* _UAPI_LINUX_SCHED_TYPES_H */ diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e8d41ff57241..a6ced9e07e1c 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -1253,8 +1253,10 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); - if (ret == 1) + if (ret == 1) { + sock = NULL; goto retry; + } if (ret) { sock = NULL; nc = NULL; diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 62d686d96581..ed748ee40029 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -457,13 +457,15 @@ void audit_remove_watch_rule(struct audit_krule *krule) list_del(&krule->rlist); if (list_empty(&watch->rules)) { + /* + * audit_remove_watch() drops our reference to 'parent' which + * can get freed. Grab our own reference to be safe. + */ + audit_get_parent(parent); audit_remove_watch(watch); - - if (list_empty(&parent->watches)) { - audit_get_parent(parent); + if (list_empty(&parent->watches)) fsnotify_destroy_mark(&parent->mark, audit_watch_group); - audit_put_parent(parent); - } + audit_put_parent(parent); } } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a8a725697bed..1e64ee3dd650 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) { regs[regno].min_value = BPF_REGISTER_MIN_RANGE; regs[regno].max_value = BPF_REGISTER_MAX_RANGE; + regs[regno].value_from_signed = false; regs[regno].min_align = 0; } @@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size, return -EACCES; } -static bool is_pointer_value(struct bpf_verifier_env *env, int regno) +static bool __is_pointer_value(bool allow_ptr_leaks, + const struct bpf_reg_state *reg) { - if (env->allow_ptr_leaks) + if (allow_ptr_leaks) return false; - switch (env->cur_state.regs[regno].type) { + switch (reg->type) { case UNKNOWN_VALUE: case CONST_IMM: return false; @@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) } } +static bool is_pointer_value(struct bpf_verifier_env *env, int regno) +{ + return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]); +} + static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, int off, int size, bool strict) { @@ -1650,6 +1657,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) return 0; } +static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env, + struct bpf_insn *insn) +{ + struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; + struct bpf_reg_state *src_reg = ®s[insn->src_reg]; + u8 opcode = BPF_OP(insn->code); + s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm); + + /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */ + if (src_reg->imm > 0 && dst_reg->imm) { + switch (opcode) { + case BPF_ADD: + /* dreg += sreg + * where both have zero upper bits. Adding them + * can only result making one more bit non-zero + * in the larger value. + * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) + * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) + */ + dst_reg->imm = min(src_reg->imm, 63 - imm_log2); + dst_reg->imm--; + break; + case BPF_AND: + /* dreg &= sreg + * AND can not extend zero bits only shrink + * Ex. 0x00..00ffffff + * & 0x0f..ffffffff + * ---------------- + * 0x00..00ffffff + */ + dst_reg->imm = max(src_reg->imm, 63 - imm_log2); + break; + case BPF_OR: + /* dreg |= sreg + * OR can only extend zero bits + * Ex. 0x00..00ffffff + * | 0x0f..ffffffff + * ---------------- + * 0x0f..00ffffff + */ + dst_reg->imm = min(src_reg->imm, 63 - imm_log2); + break; + case BPF_SUB: + case BPF_MUL: + case BPF_RSH: + case BPF_LSH: + /* These may be flushed out later */ + default: + mark_reg_unknown_value(regs, insn->dst_reg); + } + } else { + mark_reg_unknown_value(regs, insn->dst_reg); + } + + dst_reg->type = UNKNOWN_VALUE; + return 0; +} + static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) { @@ -1659,6 +1725,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, u8 opcode = BPF_OP(insn->code); u64 dst_imm = dst_reg->imm; + if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE) + return evaluate_reg_imm_alu_unknown(env, insn); + /* dst_reg->type == CONST_IMM here. Simulate execution of insns * containing ALU ops. Don't care about overflow or negative * values, just add/sub/... them; registers are in u64. @@ -1763,10 +1832,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, dst_align = dst_reg->min_align; /* We don't know anything about what was done to this register, mark it - * as unknown. + * as unknown. Also, if both derived bounds came from signed/unsigned + * mixed compares and one side is unbounded, we cannot really do anything + * with them as boundaries cannot be trusted. Thus, arithmetic of two + * regs of such kind will get invalidated bounds on the dst side. */ - if (min_val == BPF_REGISTER_MIN_RANGE && - max_val == BPF_REGISTER_MAX_RANGE) { + if ((min_val == BPF_REGISTER_MIN_RANGE && + max_val == BPF_REGISTER_MAX_RANGE) || + (BPF_SRC(insn->code) == BPF_X && + ((min_val != BPF_REGISTER_MIN_RANGE && + max_val == BPF_REGISTER_MAX_RANGE) || + (min_val == BPF_REGISTER_MIN_RANGE && + max_val != BPF_REGISTER_MAX_RANGE) || + (dst_reg->min_value != BPF_REGISTER_MIN_RANGE && + dst_reg->max_value == BPF_REGISTER_MAX_RANGE) || + (dst_reg->min_value == BPF_REGISTER_MIN_RANGE && + dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) && + regs[insn->dst_reg].value_from_signed != + regs[insn->src_reg].value_from_signed)) { reset_reg_range_values(regs, insn->dst_reg); return; } @@ -1775,10 +1858,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, * do our normal operations to the register, we need to set the values * to the min/max since they are undefined. */ - if (min_val == BPF_REGISTER_MIN_RANGE) - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - if (max_val == BPF_REGISTER_MAX_RANGE) - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + if (opcode != BPF_SUB) { + if (min_val == BPF_REGISTER_MIN_RANGE) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + if (max_val == BPF_REGISTER_MAX_RANGE) + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + } switch (opcode) { case BPF_ADD: @@ -1789,10 +1874,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, dst_reg->min_align = min(src_align, dst_align); break; case BPF_SUB: + /* If one of our values was at the end of our ranges, then the + * _opposite_ value in the dst_reg goes to the end of our range. + */ + if (min_val == BPF_REGISTER_MIN_RANGE) + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + if (max_val == BPF_REGISTER_MAX_RANGE) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) - dst_reg->min_value -= min_val; + dst_reg->min_value -= max_val; if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value -= max_val; + dst_reg->max_value -= min_val; dst_reg->min_align = min(src_align, dst_align); break; case BPF_MUL: @@ -1953,6 +2045,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) regs[insn->dst_reg].max_value = insn->imm; regs[insn->dst_reg].min_value = insn->imm; regs[insn->dst_reg].min_align = calc_align(insn->imm); + regs[insn->dst_reg].value_from_signed = false; } } else if (opcode > BPF_END) { @@ -2128,40 +2221,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { + bool value_from_signed = true; + bool is_range = true; + switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ true_reg->max_value = true_reg->min_value = val; + is_range = false; break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ false_reg->max_value = false_reg->min_value = val; + is_range = false; break; case BPF_JGT: - /* Unsigned comparison, the minimum value is 0. */ - false_reg->min_value = 0; + value_from_signed = false; /* fallthrough */ case BPF_JSGT: + if (true_reg->value_from_signed != value_from_signed) + reset_reg_range_values(true_reg, 0); + if (false_reg->value_from_signed != value_from_signed) + reset_reg_range_values(false_reg, 0); + if (opcode == BPF_JGT) { + /* Unsigned comparison, the minimum value is 0. */ + false_reg->min_value = 0; + } /* If this is false then we know the maximum val is val, * otherwise we know the min val is val+1. */ false_reg->max_value = val; + false_reg->value_from_signed = value_from_signed; true_reg->min_value = val + 1; + true_reg->value_from_signed = value_from_signed; break; case BPF_JGE: - /* Unsigned comparison, the minimum value is 0. */ - false_reg->min_value = 0; + value_from_signed = false; /* fallthrough */ case BPF_JSGE: + if (true_reg->value_from_signed != value_from_signed) + reset_reg_range_values(true_reg, 0); + if (false_reg->value_from_signed != value_from_signed) + reset_reg_range_values(false_reg, 0); + if (opcode == BPF_JGE) { + /* Unsigned comparison, the minimum value is 0. */ + false_reg->min_value = 0; + } /* If this is false then we know the maximum value is val - 1, * otherwise we know the mimimum value is val. */ false_reg->max_value = val - 1; + false_reg->value_from_signed = value_from_signed; true_reg->min_value = val; + true_reg->value_from_signed = value_from_signed; break; default: break; @@ -2169,6 +2285,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, check_reg_overflow(false_reg); check_reg_overflow(true_reg); + if (is_range) { + if (__is_pointer_value(false, false_reg)) + reset_reg_range_values(false_reg, 0); + if (__is_pointer_value(false, true_reg)) + reset_reg_range_values(true_reg, 0); + } } /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg @@ -2178,41 +2300,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { + bool value_from_signed = true; + bool is_range = true; + switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ true_reg->max_value = true_reg->min_value = val; + is_range = false; break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ false_reg->max_value = false_reg->min_value = val; + is_range = false; break; case BPF_JGT: - /* Unsigned comparison, the minimum value is 0. */ - true_reg->min_value = 0; + value_from_signed = false; /* fallthrough */ case BPF_JSGT: + if (true_reg->value_from_signed != value_from_signed) + reset_reg_range_values(true_reg, 0); + if (false_reg->value_from_signed != value_from_signed) + reset_reg_range_values(false_reg, 0); + if (opcode == BPF_JGT) { + /* Unsigned comparison, the minimum value is 0. */ + true_reg->min_value = 0; + } /* * If this is false, then the val is <= the register, if it is * true the register <= to the val. */ false_reg->min_value = val; + false_reg->value_from_signed = value_from_signed; true_reg->max_value = val - 1; + true_reg->value_from_signed = value_from_signed; break; case BPF_JGE: - /* Unsigned comparison, the minimum value is 0. */ - true_reg->min_value = 0; + value_from_signed = false; /* fallthrough */ case BPF_JSGE: + if (true_reg->value_from_signed != value_from_signed) + reset_reg_range_values(true_reg, 0); + if (false_reg->value_from_signed != value_from_signed) + reset_reg_range_values(false_reg, 0); + if (opcode == BPF_JGE) { + /* Unsigned comparison, the minimum value is 0. */ + true_reg->min_value = 0; + } /* If this is false then constant < register, if it is true then * the register < constant. */ false_reg->min_value = val + 1; + false_reg->value_from_signed = value_from_signed; true_reg->max_value = val; + true_reg->value_from_signed = value_from_signed; break; default: break; @@ -2220,6 +2365,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, check_reg_overflow(false_reg); check_reg_overflow(true_reg); + if (is_range) { + if (__is_pointer_value(false, false_reg)) + reset_reg_range_values(false_reg, 0); + if (__is_pointer_value(false, true_reg)) + reset_reg_range_values(true_reg, 0); + } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 00f4d6bf048f..7a01568e5e22 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -33,6 +33,9 @@ struct cgroup_taskset { struct list_head src_csets; struct list_head dst_csets; + /* the number of tasks in the set */ + int nr_tasks; + /* the subsys currently being processed */ int ssid; diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 8d4e85eae42c..2c62e4b3f198 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1948,6 +1948,8 @@ static void cgroup_migrate_add_task(struct task_struct *task, if (!cset->mg_src_cgrp) return; + mgctx->tset.nr_tasks++; + list_move_tail(&task->cg_list, &cset->mg_tasks); if (list_empty(&cset->mg_node)) list_add_tail(&cset->mg_node, @@ -2036,21 +2038,19 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) struct css_set *cset, *tmp_cset; int ssid, failed_ssid, ret; - /* methods shouldn't be called if no task is actually migrating */ - if (list_empty(&tset->src_csets)) - return 0; - /* check that we can legitimately attach to the cgroup */ - do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { - if (ss->can_attach) { - tset->ssid = ssid; - ret = ss->can_attach(tset); - if (ret) { - failed_ssid = ssid; - goto out_cancel_attach; + if (tset->nr_tasks) { + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { + if (ss->can_attach) { + tset->ssid = ssid; + ret = ss->can_attach(tset); + if (ret) { + failed_ssid = ssid; + goto out_cancel_attach; + } } - } - } while_each_subsys_mask(); + } while_each_subsys_mask(); + } /* * Now that we're guaranteed success, proceed to move all tasks to @@ -2077,25 +2077,29 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) */ tset->csets = &tset->dst_csets; - do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { - if (ss->attach) { - tset->ssid = ssid; - ss->attach(tset); - } - } while_each_subsys_mask(); + if (tset->nr_tasks) { + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { + if (ss->attach) { + tset->ssid = ssid; + ss->attach(tset); + } + } while_each_subsys_mask(); + } ret = 0; goto out_release_tset; out_cancel_attach: - do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { - if (ssid == failed_ssid) - break; - if (ss->cancel_attach) { - tset->ssid = ssid; - ss->cancel_attach(tset); - } - } while_each_subsys_mask(); + if (tset->nr_tasks) { + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { + if (ssid == failed_ssid) + break; + if (ss->cancel_attach) { + tset->ssid = ssid; + ss->cancel_attach(tset); + } + } while_each_subsys_mask(); + } out_release_tset: spin_lock_irq(&css_set_lock); list_splice_init(&tset->dst_csets, &tset->src_csets); @@ -2917,11 +2921,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, cgrp->subtree_control &= ~disable; ret = cgroup_apply_control(cgrp); - cgroup_finalize_control(cgrp, ret); + if (ret) + goto out_unlock; kernfs_activate(cgrp->kn); - ret = 0; out_unlock: cgroup_kn_unlock(of->kn); return ret ?: nbytes; @@ -4574,6 +4578,10 @@ int __init cgroup_init(void) if (ss->bind) ss->bind(init_css_set.subsys[ssid]); + + mutex_lock(&cgroup_mutex); + css_populate_dir(init_css_set.subsys[ssid]); + mutex_unlock(&cgroup_mutex); } /* init_css_set.subsys[] has been updated, re-hash */ diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index ae643412948a..8f26927f16a1 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -63,6 +63,7 @@ #include <linux/cgroup.h> #include <linux/wait.h> +DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); /* See "Frequency meter" comments, below. */ diff --git a/kernel/cpu.c b/kernel/cpu.c index cb5103413bd8..cea4d7f1885a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -346,11 +346,26 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable); /* Notifier wrappers for transitioning to state machine */ +static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st); + static int bringup_wait_for_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); + /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ wait_for_completion(&st->done); + if (WARN_ON_ONCE((!cpu_online(cpu)))) + return -ECANCELED; + + /* Unpark the stopper thread and the hotplug thread of the target cpu */ + stop_machine_unpark(cpu); + kthread_unpark(st->thread); + + /* Should we go further up ? */ + if (st->target > CPUHP_AP_ONLINE_IDLE) { + __cpuhp_kick_ap_work(st); + wait_for_completion(&st->done); + } return st->result; } @@ -371,9 +386,7 @@ static int bringup_cpu(unsigned int cpu) irq_unlock_sparse(); if (ret) return ret; - ret = bringup_wait_for_ap(cpu); - BUG_ON(!cpu_online(cpu)); - return ret; + return bringup_wait_for_ap(cpu); } /* @@ -859,31 +872,20 @@ void notify_cpu_starting(unsigned int cpu) } /* - * Called from the idle task. We need to set active here, so we can kick off - * the stopper thread and unpark the smpboot threads. If the target state is - * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the - * cpu further. + * Called from the idle task. Wake up the controlling task which brings the + * stopper and the hotplug thread of the upcoming CPU up and then delegates + * the rest of the online bringup to the hotplug thread. */ void cpuhp_online_idle(enum cpuhp_state state) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); - unsigned int cpu = smp_processor_id(); /* Happens for the boot cpu */ if (state != CPUHP_AP_ONLINE_IDLE) return; st->state = CPUHP_AP_ONLINE_IDLE; - - /* Unpark the stopper thread and the hotplug thread of this cpu */ - stop_machine_unpark(cpu); - kthread_unpark(st->thread); - - /* Should we go further up ? */ - if (st->target > CPUHP_AP_ONLINE_IDLE) - __cpuhp_kick_ap_work(st); - else - complete(&st->done); + complete(&st->done); } /* Requires cpu_add_remove_lock to be held */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 6c4e523dc1e2..51ecc01b78ff 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1456,6 +1456,13 @@ static enum event_type_t get_event_type(struct perf_event *event) lockdep_assert_held(&ctx->lock); + /* + * It's 'group type', really, because if our group leader is + * pinned, so are we. + */ + if (event->group_leader != event) + event = event->group_leader; + event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; if (!ctx->task) event_type |= EVENT_CPU; @@ -5077,7 +5084,7 @@ static void perf_mmap_open(struct vm_area_struct *vma) atomic_inc(&event->rb->aux_mmap_count); if (event->pmu->event_mapped) - event->pmu->event_mapped(event); + event->pmu->event_mapped(event, vma->vm_mm); } static void perf_pmu_output_stop(struct perf_event *event); @@ -5100,7 +5107,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) unsigned long size = perf_data_size(rb); if (event->pmu->event_unmapped) - event->pmu->event_unmapped(event); + event->pmu->event_unmapped(event, vma->vm_mm); /* * rb->aux_mmap_count will always drop before rb->mmap_count and @@ -5398,7 +5405,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_ops = &perf_mmap_vmops; if (event->pmu->event_mapped) - event->pmu->event_mapped(event); + event->pmu->event_mapped(event, vma->vm_mm); return ret; } @@ -7316,21 +7323,6 @@ int perf_event_account_interrupt(struct perf_event *event) return __perf_event_account_interrupt(event, 1); } -static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) -{ - /* - * Due to interrupt latency (AKA "skid"), we may enter the - * kernel before taking an overflow, even if the PMU is only - * counting user events. - * To avoid leaking information to userspace, we must always - * reject kernel samples when exclude_kernel is set. - */ - if (event->attr.exclude_kernel && !user_mode(regs)) - return false; - - return true; -} - /* * Generic event overflow handling, sampling. */ @@ -7351,12 +7343,6 @@ static int __perf_event_overflow(struct perf_event *event, ret = __perf_event_account_interrupt(event, throttle); - /* - * For security, drop the skid kernel samples if necessary. - */ - if (!sample_is_allowed(event, regs)) - return ret; - /* * XXX event_limit might not quite work as expected on inherited * events @@ -10010,28 +9996,27 @@ SYSCALL_DEFINE5(perf_event_open, goto err_context; /* - * Do not allow to attach to a group in a different - * task or CPU context: + * Make sure we're both events for the same CPU; + * grouping events for different CPUs is broken; since + * you can never concurrently schedule them anyhow. */ - if (move_group) { - /* - * Make sure we're both on the same task, or both - * per-cpu events. - */ - if (group_leader->ctx->task != ctx->task) - goto err_context; + if (group_leader->cpu != event->cpu) + goto err_context; - /* - * Make sure we're both events for the same CPU; - * grouping events for different CPUs is broken; since - * you can never concurrently schedule them anyhow. - */ - if (group_leader->cpu != event->cpu) - goto err_context; - } else { - if (group_leader->ctx != ctx) - goto err_context; - } + /* + * Make sure we're both on the same task, or both + * per-CPU events. + */ + if (group_leader->ctx->task != ctx->task) + goto err_context; + + /* + * Do not allow to attach to a group in a different task + * or CPU context. If we're moving SW events, we'll fix + * this up later, so allow that. + */ + if (!move_group && group_leader->ctx != ctx) + goto err_context; /* * Only a group leader can be exclusive or pinned diff --git a/kernel/extable.c b/kernel/extable.c index 2676d7f8baf6..4efaf26d7def 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -69,7 +69,7 @@ static inline int init_kernel_text(unsigned long addr) return 0; } -int core_kernel_text(unsigned long addr) +int notrace core_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr < (unsigned long)_etext) diff --git a/kernel/fork.c b/kernel/fork.c index e53770d2bf95..9a2b4b4f13b4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -205,19 +205,17 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) void *stack; int i; - local_irq_disable(); for (i = 0; i < NR_CACHED_STACKS; i++) { - struct vm_struct *s = this_cpu_read(cached_stacks[i]); + struct vm_struct *s; + + s = this_cpu_xchg(cached_stacks[i], NULL); if (!s) continue; - this_cpu_write(cached_stacks[i], NULL); tsk->stack_vm_area = s; - local_irq_enable(); return s->addr; } - local_irq_enable(); stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE, VMALLOC_START, VMALLOC_END, @@ -245,19 +243,15 @@ static inline void free_thread_stack(struct task_struct *tsk) { #ifdef CONFIG_VMAP_STACK if (task_stack_vm_area(tsk)) { - unsigned long flags; int i; - local_irq_save(flags); for (i = 0; i < NR_CACHED_STACKS; i++) { - if (this_cpu_read(cached_stacks[i])) + if (this_cpu_cmpxchg(cached_stacks[i], + NULL, tsk->stack_vm_area) != NULL) continue; - this_cpu_write(cached_stacks[i], tsk->stack_vm_area); - local_irq_restore(flags); return; } - local_irq_restore(flags); vfree_atomic(tsk->stack); return; @@ -808,6 +802,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_init_cpumask(mm); mm_init_aio(mm); mm_init_owner(mm, p); + RCU_INIT_POINTER(mm->exe_file, NULL); mmu_notifier_mm_init(mm); clear_tlb_flush_pending(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS @@ -1637,9 +1632,9 @@ static __latent_entropy struct task_struct *copy_process( prev_cputime_init(&p->prev_cputime); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - seqcount_init(&p->vtime_seqcount); - p->vtime_snap = 0; - p->vtime_snap_whence = VTIME_INACTIVE; + seqcount_init(&p->vtime.seqcount); + p->vtime.starttime = 0; + p->vtime.state = VTIME_INACTIVE; #endif #if defined(SPLIT_RSS_COUNTING) diff --git a/kernel/futex.c b/kernel/futex.c index 357348a6cf6b..bb8b5a9fcdd5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -670,13 +670,14 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) * this reference was taken by ihold under the page lock * pinning the inode in place so i_lock was unnecessary. The * only way for this check to fail is if the inode was - * truncated in parallel so warn for now if this happens. + * truncated in parallel which is almost certainly an + * application bug. In such a case, just retry. * * We are not calling into get_futex_key_refs() in file-backed * cases, therefore a successful atomic_inc return below will * guarantee that get_futex_key() will still imply smp_mb(); (B). */ - if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { + if (!atomic_inc_not_zero(&inode->i_count)) { rcu_read_unlock(); put_page(page); diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index e2d356dd7581..9b71406d2eec 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -66,6 +66,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) struct cpumask *masks; cpumask_var_t nmsk; + /* + * If there aren't any vectors left after applying the pre/post + * vectors don't bother with assigning affinity. + */ + if (!affv) + return NULL; + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) return NULL; @@ -140,15 +147,19 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) /** * irq_calc_affinity_vectors - Calculate the optimal number of vectors + * @minvec: The minimum number of vectors available * @maxvec: The maximum number of vectors available * @affd: Description of the affinity requirements */ -int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd) +int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) { int resv = affd->pre_vectors + affd->post_vectors; int vecs = maxvec - resv; int cpus; + if (resv > minvec) + return 0; + /* Stabilize the cpumasks */ get_online_cpus(); cpus = cpumask_weight(cpu_online_mask); diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c94da688ee9b..cdf94ce959d8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -898,13 +898,15 @@ EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { - unsigned long flags; + unsigned long flags, trigger, tmp; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return; irq_settings_clr_and_set(desc, clr, set); + trigger = irqd_get_trigger_type(&desc->irq_data); + irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); if (irq_settings_has_no_balance_set(desc)) @@ -916,7 +918,11 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) if (irq_settings_is_level(desc)) irqd_set(&desc->irq_data, IRQD_LEVEL); - irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); + tmp = irq_settings_get_trigger_mask(desc); + if (tmp != IRQ_TYPE_NONE) + trigger = tmp; + + irqd_set(&desc->irq_data, trigger); irq_put_desc_unlock(desc, flags); } diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index 1a9abc1c8ea0..259a22aa9934 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c @@ -165,7 +165,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) struct irq_data *data = irq_get_irq_data(irq); struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; - if (!data || !ipimask || cpu > nr_cpu_ids) + if (!data || !ipimask || cpu >= nr_cpu_ids) return INVALID_HWIRQ; if (!cpumask_test_cpu(cpu, ipimask)) @@ -195,7 +195,7 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, if (!chip->ipi_send_single && !chip->ipi_send_mask) return -EINVAL; - if (cpu > nr_cpu_ids) + if (cpu >= nr_cpu_ids) return -EINVAL; if (dest) { diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index c65f7989f850..20819df98125 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c @@ -231,8 +231,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) out_nolock: list_del(&waiter.list); - if (!list_empty(&sem->wait_list)) - __rwsem_do_wake(sem, 1); + if (!list_empty(&sem->wait_list) && sem->count >= 0) + __rwsem_do_wake(sem, 0); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); return -EINTR; diff --git a/kernel/pid.c b/kernel/pid.c index fd1cde1e4576..eeb892e728f8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -527,8 +527,11 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, if (!ns) ns = task_active_pid_ns(current); if (likely(pid_alive(task))) { - if (type != PIDTYPE_PID) + if (type != PIDTYPE_PID) { + if (type == __PIDTYPE_TGID) + type = PIDTYPE_PID; task = task->group_leader; + } nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); } rcu_read_unlock(); @@ -537,12 +540,6 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, } EXPORT_SYMBOL(__task_pid_nr_ns); -pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return pid_nr_ns(task_tgid(tsk), ns); -} -EXPORT_SYMBOL(task_tgid_nr_ns); - struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) { return ns_of_pid(task_pid(tsk)); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c9a48657512a..c7a343da4c37 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1769,6 +1769,7 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { /* Prior smp_mb__after_atomic() orders against prior enqueue. */ WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); + smp_mb(); /* ->nocb_leader_sleep before swake_up(). */ swake_up(&rdp_leader->nocb_wq); } } @@ -2023,6 +2024,7 @@ static void nocb_leader_wait(struct rcu_data *my_rdp) * nocb_gp_head, where they await a grace period. */ gotcbs = false; + smp_mb(); /* wakeup before ->nocb_head reads. */ for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head); if (!rdp->nocb_gp_head) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index aea3135c5d90..ad275b25e3f9 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -679,20 +679,21 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -static u64 vtime_delta(struct task_struct *tsk) +static u64 vtime_delta(struct vtime *vtime) { - unsigned long now = READ_ONCE(jiffies); + unsigned long long clock; - if (time_before(now, (unsigned long)tsk->vtime_snap)) + clock = sched_clock(); + if (clock < vtime->starttime) return 0; - return jiffies_to_nsecs(now - tsk->vtime_snap); + return clock - vtime->starttime; } -static u64 get_vtime_delta(struct task_struct *tsk) +static u64 get_vtime_delta(struct vtime *vtime) { - unsigned long now = READ_ONCE(jiffies); - u64 delta, other; + u64 delta = vtime_delta(vtime); + u64 other; /* * Unlike tick based timing, vtime based timing never has lost @@ -701,104 +702,138 @@ static u64 get_vtime_delta(struct task_struct *tsk) * elapsed time. Limit account_other_time to prevent rounding * errors from causing elapsed vtime to go negative. */ - delta = jiffies_to_nsecs(now - tsk->vtime_snap); other = account_other_time(delta); - WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); - tsk->vtime_snap = now; + WARN_ON_ONCE(vtime->state == VTIME_INACTIVE); + vtime->starttime += delta; return delta - other; } -static void __vtime_account_system(struct task_struct *tsk) +static void __vtime_account_system(struct task_struct *tsk, + struct vtime *vtime) { - account_system_time(tsk, irq_count(), get_vtime_delta(tsk)); + vtime->stime += get_vtime_delta(vtime); + if (vtime->stime >= TICK_NSEC) { + account_system_time(tsk, irq_count(), vtime->stime); + vtime->stime = 0; + } +} + +static void vtime_account_guest(struct task_struct *tsk, + struct vtime *vtime) +{ + vtime->gtime += get_vtime_delta(vtime); + if (vtime->gtime >= TICK_NSEC) { + account_guest_time(tsk, vtime->gtime); + vtime->gtime = 0; + } } void vtime_account_system(struct task_struct *tsk) { - if (!vtime_delta(tsk)) + struct vtime *vtime = &tsk->vtime; + + if (!vtime_delta(vtime)) return; - write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); - write_seqcount_end(&tsk->vtime_seqcount); + write_seqcount_begin(&vtime->seqcount); + /* We might have scheduled out from guest path */ + if (current->flags & PF_VCPU) + vtime_account_guest(tsk, vtime); + else + __vtime_account_system(tsk, vtime); + write_seqcount_end(&vtime->seqcount); } -void vtime_account_user(struct task_struct *tsk) +void vtime_user_enter(struct task_struct *tsk) { - write_seqcount_begin(&tsk->vtime_seqcount); - tsk->vtime_snap_whence = VTIME_SYS; - if (vtime_delta(tsk)) - account_user_time(tsk, get_vtime_delta(tsk)); - write_seqcount_end(&tsk->vtime_seqcount); + struct vtime *vtime = &tsk->vtime; + + write_seqcount_begin(&vtime->seqcount); + __vtime_account_system(tsk, vtime); + vtime->state = VTIME_USER; + write_seqcount_end(&vtime->seqcount); } -void vtime_user_enter(struct task_struct *tsk) +void vtime_user_exit(struct task_struct *tsk) { - write_seqcount_begin(&tsk->vtime_seqcount); - if (vtime_delta(tsk)) - __vtime_account_system(tsk); - tsk->vtime_snap_whence = VTIME_USER; - write_seqcount_end(&tsk->vtime_seqcount); + struct vtime *vtime = &tsk->vtime; + + write_seqcount_begin(&vtime->seqcount); + vtime->utime += get_vtime_delta(vtime); + if (vtime->utime >= TICK_NSEC) { + account_user_time(tsk, vtime->utime); + vtime->utime = 0; + } + vtime->state = VTIME_SYS; + write_seqcount_end(&vtime->seqcount); } void vtime_guest_enter(struct task_struct *tsk) { + struct vtime *vtime = &tsk->vtime; /* * The flags must be updated under the lock with - * the vtime_snap flush and update. + * the vtime_starttime flush and update. * That enforces a right ordering and update sequence * synchronization against the reader (task_gtime()) * that can thus safely catch up with a tickless delta. */ - write_seqcount_begin(&tsk->vtime_seqcount); - if (vtime_delta(tsk)) - __vtime_account_system(tsk); + write_seqcount_begin(&vtime->seqcount); + __vtime_account_system(tsk, vtime); current->flags |= PF_VCPU; - write_seqcount_end(&tsk->vtime_seqcount); + write_seqcount_end(&vtime->seqcount); } EXPORT_SYMBOL_GPL(vtime_guest_enter); void vtime_guest_exit(struct task_struct *tsk) { - write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); + struct vtime *vtime = &tsk->vtime; + + write_seqcount_begin(&vtime->seqcount); + vtime_account_guest(tsk, vtime); current->flags &= ~PF_VCPU; - write_seqcount_end(&tsk->vtime_seqcount); + write_seqcount_end(&vtime->seqcount); } EXPORT_SYMBOL_GPL(vtime_guest_exit); void vtime_account_idle(struct task_struct *tsk) { - account_idle_time(get_vtime_delta(tsk)); + account_idle_time(get_vtime_delta(&tsk->vtime)); } void arch_vtime_task_switch(struct task_struct *prev) { - write_seqcount_begin(&prev->vtime_seqcount); - prev->vtime_snap_whence = VTIME_INACTIVE; - write_seqcount_end(&prev->vtime_seqcount); + struct vtime *vtime = &prev->vtime; - write_seqcount_begin(¤t->vtime_seqcount); - current->vtime_snap_whence = VTIME_SYS; - current->vtime_snap = jiffies; - write_seqcount_end(¤t->vtime_seqcount); + write_seqcount_begin(&vtime->seqcount); + vtime->state = VTIME_INACTIVE; + write_seqcount_end(&vtime->seqcount); + + vtime = ¤t->vtime; + + write_seqcount_begin(&vtime->seqcount); + vtime->state = VTIME_SYS; + vtime->starttime = sched_clock(); + write_seqcount_end(&vtime->seqcount); } void vtime_init_idle(struct task_struct *t, int cpu) { + struct vtime *vtime = &t->vtime; unsigned long flags; local_irq_save(flags); - write_seqcount_begin(&t->vtime_seqcount); - t->vtime_snap_whence = VTIME_SYS; - t->vtime_snap = jiffies; - write_seqcount_end(&t->vtime_seqcount); + write_seqcount_begin(&vtime->seqcount); + vtime->state = VTIME_SYS; + vtime->starttime = sched_clock(); + write_seqcount_end(&vtime->seqcount); local_irq_restore(flags); } u64 task_gtime(struct task_struct *t) { + struct vtime *vtime = &t->vtime; unsigned int seq; u64 gtime; @@ -806,13 +841,13 @@ u64 task_gtime(struct task_struct *t) return t->gtime; do { - seq = read_seqcount_begin(&t->vtime_seqcount); + seq = read_seqcount_begin(&vtime->seqcount); gtime = t->gtime; - if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU) - gtime += vtime_delta(t); + if (vtime->state == VTIME_SYS && t->flags & PF_VCPU) + gtime += vtime->gtime + vtime_delta(vtime); - } while (read_seqcount_retry(&t->vtime_seqcount, seq)); + } while (read_seqcount_retry(&vtime->seqcount, seq)); return gtime; } @@ -824,8 +859,9 @@ u64 task_gtime(struct task_struct *t) */ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) { - u64 delta; + struct vtime *vtime = &t->vtime; unsigned int seq; + u64 delta; if (!vtime_accounting_enabled()) { *utime = t->utime; @@ -834,25 +870,25 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime) } do { - seq = read_seqcount_begin(&t->vtime_seqcount); + seq = read_seqcount_begin(&vtime->seqcount); *utime = t->utime; *stime = t->stime; /* Task is sleeping, nothing to add */ - if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t)) + if (vtime->state == VTIME_INACTIVE || is_idle_task(t)) continue; - delta = vtime_delta(t); + delta = vtime_delta(vtime); /* * Task runs either in user or kernel space, add pending nohz time to * the right place. */ - if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) - *utime += delta; - else if (t->vtime_snap_whence == VTIME_SYS) - *stime += delta; - } while (read_seqcount_retry(&t->vtime_seqcount, seq)); + if (vtime->state == VTIME_USER || t->flags & PF_VCPU) + *utime += vtime->utime + delta; + else if (vtime->state == VTIME_SYS) + *stime += vtime->stime + delta; + } while (read_seqcount_retry(&vtime->seqcount, seq)); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c77e4b1d51c0..5f8c91bdac3c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1381,7 +1381,6 @@ static unsigned long weighted_cpuload(const int cpu); static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static unsigned long capacity_of(int cpu); -static long effective_load(struct task_group *tg, int cpu, long wl, long wg); /* Cached statistics for all CPUs within a node */ struct numa_stats { @@ -2469,7 +2468,8 @@ void task_numa_work(struct callback_head *work) return; - down_read(&mm->mmap_sem); + if (!down_read_trylock(&mm->mmap_sem)) + return; vma = find_vma(mm, start); if (!vma) { reset_ptenuma_scan(p); @@ -2584,6 +2584,60 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) } } } + +/* + * Can a task be moved from prev_cpu to this_cpu without causing a load + * imbalance that would trigger the load balancer? + */ +static inline bool numa_wake_affine(struct sched_domain *sd, + struct task_struct *p, int this_cpu, + int prev_cpu, int sync) +{ + struct numa_stats prev_load, this_load; + s64 this_eff_load, prev_eff_load; + + update_numa_stats(&prev_load, cpu_to_node(prev_cpu)); + update_numa_stats(&this_load, cpu_to_node(this_cpu)); + + /* + * If sync wakeup then subtract the (maximum possible) + * effect of the currently running task from the load + * of the current CPU: + */ + if (sync) { + unsigned long current_load = task_h_load(current); + + if (this_load.load > current_load) + this_load.load -= current_load; + else + this_load.load = 0; + } + + /* + * In low-load situations, where this_cpu's node is idle due to the + * sync cause above having dropped this_load.load to 0, move the task. + * Moving to an idle socket will not create a bad imbalance. + * + * Otherwise check if the nodes are near enough in load to allow this + * task to be woken on this_cpu's node. + */ + if (this_load.load > 0) { + unsigned long task_load = task_h_load(p); + + this_eff_load = 100; + this_eff_load *= prev_load.compute_capacity; + + prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; + prev_eff_load *= this_load.compute_capacity; + + this_eff_load *= this_load.load + task_load; + prev_eff_load *= prev_load.load - task_load; + + return this_eff_load <= prev_eff_load; + } + + return true; +} #else static void task_tick_numa(struct rq *rq, struct task_struct *curr) { @@ -2596,6 +2650,15 @@ static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) { } + +#ifdef CONFIG_SMP +static inline bool numa_wake_affine(struct sched_domain *sd, + struct task_struct *p, int this_cpu, + int prev_cpu, int sync) +{ + return true; +} +#endif /* !SMP */ #endif /* CONFIG_NUMA_BALANCING */ static void @@ -2982,8 +3045,7 @@ __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq) * differential update where we store the last value we propagated. This in * turn allows skipping updates if the differential is 'small'. * - * Updating tg's load_avg is necessary before update_cfs_share() (which is - * done) and effective_load() (which is not done because it is too costly). + * Updating tg's load_avg is necessary before update_cfs_share(). */ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) { @@ -5215,126 +5277,6 @@ static unsigned long cpu_avg_load_per_task(int cpu) return 0; } -#ifdef CONFIG_FAIR_GROUP_SCHED -/* - * effective_load() calculates the load change as seen from the root_task_group - * - * Adding load to a group doesn't make a group heavier, but can cause movement - * of group shares between cpus. Assuming the shares were perfectly aligned one - * can calculate the shift in shares. - * - * Calculate the effective load difference if @wl is added (subtracted) to @tg - * on this @cpu and results in a total addition (subtraction) of @wg to the - * total group weight. - * - * Given a runqueue weight distribution (rw_i) we can compute a shares - * distribution (s_i) using: - * - * s_i = rw_i / \Sum rw_j (1) - * - * Suppose we have 4 CPUs and our @tg is a direct child of the root group and - * has 7 equal weight tasks, distributed as below (rw_i), with the resulting - * shares distribution (s_i): - * - * rw_i = { 2, 4, 1, 0 } - * s_i = { 2/7, 4/7, 1/7, 0 } - * - * As per wake_affine() we're interested in the load of two CPUs (the CPU the - * task used to run on and the CPU the waker is running on), we need to - * compute the effect of waking a task on either CPU and, in case of a sync - * wakeup, compute the effect of the current task going to sleep. - * - * So for a change of @wl to the local @cpu with an overall group weight change - * of @wl we can compute the new shares distribution (s'_i) using: - * - * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2) - * - * Suppose we're interested in CPUs 0 and 1, and want to compute the load - * differences in waking a task to CPU 0. The additional task changes the - * weight and shares distributions like: - * - * rw'_i = { 3, 4, 1, 0 } - * s'_i = { 3/8, 4/8, 1/8, 0 } - * - * We can then compute the difference in effective weight by using: - * - * dw_i = S * (s'_i - s_i) (3) - * - * Where 'S' is the group weight as seen by its parent. - * - * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7) - * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 - - * 4/7) times the weight of the group. - */ -static long effective_load(struct task_group *tg, int cpu, long wl, long wg) -{ - struct sched_entity *se = tg->se[cpu]; - - if (!tg->parent) /* the trivial, non-cgroup case */ - return wl; - - for_each_sched_entity(se) { - struct cfs_rq *cfs_rq = se->my_q; - long W, w = cfs_rq_load_avg(cfs_rq); - - tg = cfs_rq->tg; - - /* - * W = @wg + \Sum rw_j - */ - W = wg + atomic_long_read(&tg->load_avg); - - /* Ensure \Sum rw_j >= rw_i */ - W -= cfs_rq->tg_load_avg_contrib; - W += w; - - /* - * w = rw_i + @wl - */ - w += wl; - - /* - * wl = S * s'_i; see (2) - */ - if (W > 0 && w < W) - wl = (w * (long)scale_load_down(tg->shares)) / W; - else - wl = scale_load_down(tg->shares); - - /* - * Per the above, wl is the new se->load.weight value; since - * those are clipped to [MIN_SHARES, ...) do so now. See - * calc_cfs_shares(). - */ - if (wl < MIN_SHARES) - wl = MIN_SHARES; - - /* - * wl = dw_i = S * (s'_i - s_i); see (3) - */ - wl -= se->avg.load_avg; - - /* - * Recursively apply this logic to all parent groups to compute - * the final effective load change on the root group. Since - * only the @tg group gets extra weight, all parent groups can - * only redistribute existing shares. @wl is the shift in shares - * resulting from this level per the above. - */ - wg = 0; - } - - return wl; -} -#else - -static long effective_load(struct task_group *tg, int cpu, long wl, long wg) -{ - return wl; -} - -#endif - static void record_wakee(struct task_struct *p) { /* @@ -5385,67 +5327,25 @@ static int wake_wide(struct task_struct *p) static int wake_affine(struct sched_domain *sd, struct task_struct *p, int prev_cpu, int sync) { - s64 this_load, load; - s64 this_eff_load, prev_eff_load; - int idx, this_cpu; - struct task_group *tg; - unsigned long weight; - int balanced; - - idx = sd->wake_idx; - this_cpu = smp_processor_id(); - load = source_load(prev_cpu, idx); - this_load = target_load(this_cpu, idx); + int this_cpu = smp_processor_id(); + bool affine = false; /* - * If sync wakeup then subtract the (maximum possible) - * effect of the currently running task from the load - * of the current CPU: - */ - if (sync) { - tg = task_group(current); - weight = current->se.avg.load_avg; - - this_load += effective_load(tg, this_cpu, -weight, -weight); - load += effective_load(tg, prev_cpu, 0, -weight); - } - - tg = task_group(p); - weight = p->se.avg.load_avg; - - /* - * In low-load situations, where prev_cpu is idle and this_cpu is idle - * due to the sync cause above having dropped this_load to 0, we'll - * always have an imbalance, but there's really nothing you can do - * about that, so that's good too. - * - * Otherwise check if either cpus are near enough in load to allow this - * task to be woken on this_cpu. + * Common case: CPUs are in the same socket, and select_idle_sibling() + * will do its thing regardless of what we return: */ - this_eff_load = 100; - this_eff_load *= capacity_of(prev_cpu); - - prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; - prev_eff_load *= capacity_of(this_cpu); - - if (this_load > 0) { - this_eff_load *= this_load + - effective_load(tg, this_cpu, weight, weight); - - prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight); - } - - balanced = this_eff_load <= prev_eff_load; + if (cpus_share_cache(prev_cpu, this_cpu)) + affine = true; + else + affine = numa_wake_affine(sd, p, this_cpu, prev_cpu, sync); schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); + if (affine) { + schedstat_inc(sd->ttwu_move_affine); + schedstat_inc(p->se.statistics.nr_wakeups_affine); + } - if (!balanced) - return 0; - - schedstat_inc(sd->ttwu_move_affine); - schedstat_inc(p->se.statistics.nr_wakeups_affine); - - return 1; + return affine; } static inline int task_util(struct task_struct *p); @@ -5640,43 +5540,6 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; } -/* - * Implement a for_each_cpu() variant that starts the scan at a given cpu - * (@start), and wraps around. - * - * This is used to scan for idle CPUs; such that not all CPUs looking for an - * idle CPU find the same CPU. The down-side is that tasks tend to cycle - * through the LLC domain. - * - * Especially tbench is found sensitive to this. - */ - -static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped) -{ - int next; - -again: - next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1); - - if (*wrapped) { - if (next >= start) - return nr_cpumask_bits; - } else { - if (next >= nr_cpumask_bits) { - *wrapped = 1; - n = -1; - goto again; - } - } - - return next; -} - -#define for_each_cpu_wrap(cpu, mask, start, wrap) \ - for ((wrap) = 0, (cpu) = (start)-1; \ - (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \ - (cpu) < nr_cpumask_bits; ) - #ifdef CONFIG_SCHED_SMT static inline void set_idle_cores(int cpu, int val) @@ -5736,7 +5599,7 @@ void __update_idle_core(struct rq *rq) static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); - int core, cpu, wrap; + int core, cpu; if (!static_branch_likely(&sched_smt_present)) return -1; @@ -5746,7 +5609,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed); - for_each_cpu_wrap(core, cpus, target, wrap) { + for_each_cpu_wrap(core, cpus, target) { bool idle = true; for_each_cpu(cpu, cpu_smt_mask(core)) { @@ -5809,27 +5672,38 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) { struct sched_domain *this_sd; - u64 avg_cost, avg_idle = this_rq()->avg_idle; + u64 avg_cost, avg_idle; u64 time, cost; s64 delta; - int cpu, wrap; + int cpu, nr = INT_MAX; this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); if (!this_sd) return -1; - avg_cost = this_sd->avg_scan_cost; - /* * Due to large variance we need a large fuzz factor; hackbench in * particularly is sensitive here. */ - if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost) + avg_idle = this_rq()->avg_idle / 512; + avg_cost = this_sd->avg_scan_cost + 1; + + if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) return -1; + if (sched_feat(SIS_PROP)) { + u64 span_avg = sd->span_weight * avg_idle; + if (span_avg > 4*avg_cost) + nr = div_u64(span_avg, avg_cost); + else + nr = 4; + } + time = local_clock(); - for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) { + for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { + if (!--nr) + return -1; if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; if (idle_cpu(cpu)) @@ -6011,11 +5885,15 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (affine_sd) { sd = NULL; /* Prefer wake_affine over balance flags */ - if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync)) + if (cpu == prev_cpu) + goto pick_cpu; + + if (wake_affine(affine_sd, p, prev_cpu, sync)) new_cpu = cpu; } if (!sd) { + pick_cpu: if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */ new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); @@ -6686,6 +6564,10 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) if (dst_nid == p->numa_preferred_nid) return 0; + /* Leaving a core idle is often worse than degrading locality. */ + if (env->idle != CPU_NOT_IDLE) + return -1; + if (numa_group) { src_faults = group_faults(p, src_nid); dst_faults = group_faults(p, dst_nid); @@ -6737,10 +6619,10 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) * our sched_group. We may want to revisit it if we couldn't * meet load balance goals by pulling other tasks on src_cpu. * - * Also avoid computing new_dst_cpu if we have already computed - * one in current iteration. + * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have + * already computed one in current iteration. */ - if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED)) + if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) return 0; /* Prevent to re-select dst_cpu via env's cpus */ @@ -8091,14 +7973,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, .tasks = LIST_HEAD_INIT(env.tasks), }; - /* - * For NEWLY_IDLE load_balancing, we don't need to consider - * other cpus in our group - */ - if (idle == CPU_NEWLY_IDLE) - env.dst_grpmask = NULL; - - cpumask_copy(cpus, cpu_active_mask); + cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); schedstat_inc(sd->lb_count[idle]); @@ -8220,7 +8095,15 @@ static int load_balance(int this_cpu, struct rq *this_rq, /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(env.flags & LBF_ALL_PINNED)) { cpumask_clear_cpu(cpu_of(busiest), cpus); - if (!cpumask_empty(cpus)) { + /* + * Attempting to continue load balancing at the current + * sched_domain level only makes sense if there are + * active CPUs remaining as possible busiest CPUs to + * pull load from which are not contained within the + * destination group that is receiving any migrated + * load. + */ + if (!cpumask_subset(cpus, env.dst_grpmask)) { env.loop = 0; env.loop_break = sched_nr_migrate_break; goto redo; @@ -8516,6 +8399,13 @@ static int active_load_balance_cpu_stop(void *data) .src_cpu = busiest_rq->cpu, .src_rq = busiest_rq, .idle = CPU_IDLE, + /* + * can_migrate_task() doesn't need to compute new_dst_cpu + * for active balancing. Since we have CPU_IDLE, but no + * @dst_grpmask we need to make that test go away with lying + * about DST_PINNED. + */ + .flags = LBF_DST_PINNED, }; schedstat_inc(sd->alb_count); diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 11192e0cb122..ce7b4b6ac733 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -55,6 +55,7 @@ SCHED_FEAT(TTWU_QUEUE, true) * When doing wakeups, attempt to limit superfluous scans of the LLC domain. */ SCHED_FEAT(SIS_AVG_CPU, false) +SCHED_FEAT(SIS_PROP, true) /* * Issue a WARN when we do multiple update_rq_clock() calls diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 1b0b4fb12837..f463b6b7b378 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -480,6 +480,9 @@ enum s_alloc { * Build an iteration mask that can exclude certain CPUs from the upwards * domain traversal. * + * Only CPUs that can arrive at this group should be considered to continue + * balancing. + * * Asymmetric node setups can result in situations where the domain tree is of * unequal depth, make sure to skip domains that already cover the entire * range. @@ -490,18 +493,31 @@ enum s_alloc { */ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) { - const struct cpumask *span = sched_domain_span(sd); + const struct cpumask *sg_span = sched_group_cpus(sg); struct sd_data *sdd = sd->private; struct sched_domain *sibling; int i; - for_each_cpu(i, span) { + for_each_cpu(i, sg_span) { sibling = *per_cpu_ptr(sdd->sd, i); - if (!cpumask_test_cpu(i, sched_domain_span(sibling))) + + /* + * Can happen in the asymmetric case, where these siblings are + * unused. The mask will not be empty because those CPUs that + * do have the top domain _should_ span the domain. + */ + if (!sibling->child) + continue; + + /* If we would not end up here, we can't continue from here */ + if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) continue; cpumask_set_cpu(i, sched_group_mask(sg)); } + + /* We must not have empty masks here */ + WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg))); } /* @@ -525,7 +541,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) cpumask_clear(covered); - for_each_cpu(i, span) { + for_each_cpu_wrap(i, span, cpu) { struct cpumask *sg_span; if (cpumask_test_cpu(i, covered)) diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index ee2f4202d82a..11e70a38497c 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -665,7 +665,7 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, * Rate limit to the tick as a hot fix to prevent DOS. Will be * mopped up later. */ - if (timr->it.alarm.interval < TICK_NSEC) + if (timr->it.alarm.interval && timr->it.alarm.interval < TICK_NSEC) timr->it.alarm.interval = TICK_NSEC; exp = timespec64_to_ktime(new_setting->it_value); diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 152a706ef8b8..36cec054b8ae 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -203,6 +203,7 @@ struct timer_base { bool migration_enabled; bool nohz_active; bool is_idle; + bool must_forward_clk; DECLARE_BITMAP(pending_map, WHEEL_SIZE); struct hlist_head vectors[WHEEL_SIZE]; } ____cacheline_aligned; @@ -856,13 +857,19 @@ get_target_base(struct timer_base *base, unsigned tflags) static inline void forward_timer_base(struct timer_base *base) { - unsigned long jnow = READ_ONCE(jiffies); + unsigned long jnow; /* - * We only forward the base when it's idle and we have a delta between - * base clock and jiffies. + * We only forward the base when we are idle or have just come out of + * idle (must_forward_clk logic), and have a delta between base clock + * and jiffies. In the common case, run_timers will take care of it. */ - if (!base->is_idle || (long) (jnow - base->clk) < 2) + if (likely(!base->must_forward_clk)) + return; + + jnow = READ_ONCE(jiffies); + base->must_forward_clk = base->is_idle; + if ((long)(jnow - base->clk) < 2) return; /* @@ -938,6 +945,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) * same array bucket then just return: */ if (timer_pending(timer)) { + /* + * The downside of this optimization is that it can result in + * larger granularity than you would get from adding a new + * timer with this expiry. + */ if (timer->expires == expires) return 1; @@ -948,6 +960,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) * dequeue/enqueue dance. */ base = lock_timer_base(timer, &flags); + forward_timer_base(base); clk = base->clk; idx = calc_wheel_index(expires, clk); @@ -964,6 +977,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) } } else { base = lock_timer_base(timer, &flags); + forward_timer_base(base); } ret = detach_if_pending(timer, base, false); @@ -991,12 +1005,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) spin_lock(&base->lock); WRITE_ONCE(timer->flags, (timer->flags & ~TIMER_BASEMASK) | base->cpu); + forward_timer_base(base); } } - /* Try to forward a stale timer base clock */ - forward_timer_base(base); - timer->expires = expires; /* * If 'idx' was calculated above and the base time did not advance @@ -1112,6 +1124,7 @@ void add_timer_on(struct timer_list *timer, int cpu) WRITE_ONCE(timer->flags, (timer->flags & ~TIMER_BASEMASK) | cpu); } + forward_timer_base(base); debug_activate(timer, timer->expires); internal_add_timer(base, timer); @@ -1495,12 +1508,18 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) base->is_idle = false; } else { if (!is_max_delta) - expires = basem + (nextevt - basej) * TICK_NSEC; + expires = basem + (u64)(nextevt - basej) * TICK_NSEC; /* - * If we expect to sleep more than a tick, mark the base idle: + * If we expect to sleep more than a tick, mark the base idle. + * Also the tick is stopped so any added timer must forward + * the base clk itself to keep granularity small. This idle + * logic is only maintained for the BASE_STD base, deferrable + * timers may still see large granularity skew (by design). */ - if ((expires - basem) > TICK_NSEC) + if ((expires - basem) > TICK_NSEC) { + base->must_forward_clk = true; base->is_idle = true; + } } spin_unlock(&base->lock); @@ -1611,6 +1630,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) { struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); + /* + * must_forward_clk must be cleared before running timers so that any + * timer functions that call mod_timer will not try to forward the + * base. idle trcking / clock forwarding logic is only used with + * BASE_STD timers. + * + * The deferrable base does not do idle tracking at all, so we do + * not forward it. This can result in very large variations in + * granularity for deferrable timers, but they can be deferred for + * long periods due to idle. + */ + base->must_forward_clk = false; + __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 460a031c77e5..d521b301dee9 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, fmt_cnt++; } - return __trace_printk(1/* fake ip will not be printed */, fmt, - mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1, - mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2, - mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3); +/* Horrid workaround for getting va_list handling working with different + * argument type combinations generically for 32 and 64 bit archs. + */ +#define __BPF_TP_EMIT() __BPF_ARG3_TP() +#define __BPF_TP(...) \ + __trace_printk(1 /* Fake ip will not be printed. */, \ + fmt, ##__VA_ARGS__) + +#define __BPF_ARG1_TP(...) \ + ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ + ? __BPF_TP(arg1, ##__VA_ARGS__) \ + : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \ + ? __BPF_TP((long)arg1, ##__VA_ARGS__) \ + : __BPF_TP((u32)arg1, ##__VA_ARGS__))) + +#define __BPF_ARG2_TP(...) \ + ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \ + ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \ + : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \ + ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \ + : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__))) + +#define __BPF_ARG3_TP(...) \ + ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \ + ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \ + : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \ + ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \ + : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__))) + + return __BPF_TP_EMIT(); } static const struct bpf_func_proto bpf_trace_printk_proto = { diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b308be30dfb9..a2bbce575e88 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -878,6 +878,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) function_profile_call(trace->func, 0, NULL, NULL); + /* If function graph is shutting down, ret_stack can be NULL */ + if (!current->ret_stack) + return 0; + if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) current->ret_stack[index].subtime = 0; @@ -3665,7 +3669,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod) int exclude_mod = 0; int found = 0; int ret; - int clear_filter; + int clear_filter = 0; if (func) { func_g.type = filter_parse_regex(func, len, &func_g.search, diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 4ae268e687fe..912f62df0279 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); * the page that was allocated, with the read page of the buffer. * * Returns: - * The page allocated, or NULL on error. + * The page allocated, or ERR_PTR */ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) { - struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + struct ring_buffer_per_cpu *cpu_buffer; struct buffer_data_page *bpage = NULL; unsigned long flags; struct page *page; + if (!cpumask_test_cpu(cpu, buffer->cpumask)) + return ERR_PTR(-ENODEV); + + cpu_buffer = buffer->buffers[cpu]; local_irq_save(flags); arch_spin_lock(&cpu_buffer->lock); @@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) - return NULL; + return ERR_PTR(-ENOMEM); bpage = page_address(page); @@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); * * for example: * rpage = ring_buffer_alloc_read_page(buffer, cpu); - * if (!rpage) - * return error; + * if (IS_ERR(rpage)) + * return PTR_ERR(rpage); * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); * if (ret >= 0) * process_page(rpage, ret); diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 9fbcaf567886..68ee79afe31c 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c @@ -113,7 +113,7 @@ static enum event_status read_page(int cpu) int i; bpage = ring_buffer_alloc_read_page(buffer, cpu); - if (!bpage) + if (IS_ERR(bpage)) return EVENT_DROPPED; ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 091e801145c9..749a82c6a832 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6403,7 +6403,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; - ssize_t ret; + ssize_t ret = 0; ssize_t size; if (!count) @@ -6417,10 +6417,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, iter->cpu_file); - info->spare_cpu = iter->cpu_file; + if (IS_ERR(info->spare)) { + ret = PTR_ERR(info->spare); + info->spare = NULL; + } else { + info->spare_cpu = iter->cpu_file; + } } if (!info->spare) - return -ENOMEM; + return ret; /* Do we have previous read data to read? */ if (info->read < PAGE_SIZE) @@ -6595,8 +6600,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ref->ref = 1; ref->buffer = iter->trace_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); - if (!ref->page) { - ret = -ENOMEM; + if (IS_ERR(ref->page)) { + ret = PTR_ERR(ref->page); + ref->page = NULL; kfree(ref); break; } @@ -7594,6 +7600,7 @@ static int instance_rmdir(const char *name) } kfree(tr->topts); + free_cpumask_var(tr->tracing_cpumask); kfree(tr->name); kfree(tr); @@ -8109,6 +8116,7 @@ __init static int tracer_alloc_buffers(void) if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ + ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; @@ -8223,4 +8231,4 @@ __init static int clear_boot_tracer(void) } fs_initcall(tracer_init_tracefs); -late_initcall(clear_boot_tracer); +late_initcall_sync(clear_boot_tracer); diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 59a411ff60c7..181e139a8057 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call, if (err && set_str) append_filter_err(ps, filter); } + if (err && !set_str) { + free_event_filter(filter); + filter = NULL; + } create_filter_finish(ps); *filterp = filter; diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 0a689bbb78ef..305039b122fa 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a) if (!a) return; - if (!a->pages) { - kfree(a); - return; - } + if (!a->pages) + goto free; for (i = 0; i < a->n_pages; i++) { if (!a->pages[i]) break; free_page((unsigned long)a->pages[i]); } + + kfree(a->pages); + + free: + kfree(a); } struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 03e0b69bb5bf..b8e938c7273f 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -161,6 +161,7 @@ static void set_sample_period(void) * hardlockup detector generates a warning */ sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); + watchdog_update_hrtimer_threshold(sample_period); } /* Commands for resetting the watchdog */ diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 54a427d1f344..cd0986b69cbc 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -70,6 +70,62 @@ void touch_nmi_watchdog(void) } EXPORT_SYMBOL(touch_nmi_watchdog); +#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP +static DEFINE_PER_CPU(ktime_t, last_timestamp); +static DEFINE_PER_CPU(unsigned int, nmi_rearmed); +static ktime_t watchdog_hrtimer_sample_threshold __read_mostly; + +void watchdog_update_hrtimer_threshold(u64 period) +{ + /* + * The hrtimer runs with a period of (watchdog_threshold * 2) / 5 + * + * So it runs effectively with 2.5 times the rate of the NMI + * watchdog. That means the hrtimer should fire 2-3 times before + * the NMI watchdog expires. The NMI watchdog on x86 is based on + * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles + * might run way faster than expected and the NMI fires in a + * smaller period than the one deduced from the nominal CPU + * frequency. Depending on the Turbo-Mode factor this might be fast + * enough to get the NMI period smaller than the hrtimer watchdog + * period and trigger false positives. + * + * The sample threshold is used to check in the NMI handler whether + * the minimum time between two NMI samples has elapsed. That + * prevents false positives. + * + * Set this to 4/5 of the actual watchdog threshold period so the + * hrtimer is guaranteed to fire at least once within the real + * watchdog threshold. + */ + watchdog_hrtimer_sample_threshold = period * 2; +} + +static bool watchdog_check_timestamp(void) +{ + ktime_t delta, now = ktime_get_mono_fast_ns(); + + delta = now - __this_cpu_read(last_timestamp); + if (delta < watchdog_hrtimer_sample_threshold) { + /* + * If ktime is jiffies based, a stalled timer would prevent + * jiffies from being incremented and the filter would look + * at a stale timestamp and never trigger. + */ + if (__this_cpu_inc_return(nmi_rearmed) < 10) + return false; + } + __this_cpu_write(nmi_rearmed, 0); + __this_cpu_write(last_timestamp, now); + return true; +} +#else +static inline bool watchdog_check_timestamp(void) +{ + return true; +} +#endif + static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, @@ -94,6 +150,9 @@ static void watchdog_overflow_callback(struct perf_event *event, return; } + if (!watchdog_check_timestamp()) + return; + /* check for a hardlockup * This is done by making sure our timer interrupt * is incrementing. The timer interrupt should have diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c74bf39ef764..6effbcb7a3d6 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -3744,8 +3744,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, return -EINVAL; /* creating multiple pwqs breaks ordering guarantee */ - if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) - return -EINVAL; + if (!list_empty(&wq->pwqs)) { + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + return -EINVAL; + + wq->flags &= ~__WQ_ORDERED; + } ctx = apply_wqattrs_prepare(wq, attrs); if (!ctx) @@ -3929,6 +3933,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, struct workqueue_struct *wq; struct pool_workqueue *pwq; + /* + * Unbound && max_active == 1 used to imply ordered, which is no + * longer the case on NUMA machines due to per-node pools. While + * alloc_ordered_workqueue() is the right way to create an ordered + * workqueue, keep the previous behavior to avoid subtle breakages + * on NUMA. + */ + if ((flags & WQ_UNBOUND) && max_active == 1) + flags |= __WQ_ORDERED; + /* see the comment above the definition of WQ_POWER_EFFICIENT */ if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) flags |= WQ_UNBOUND; @@ -4119,13 +4133,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) struct pool_workqueue *pwq; /* disallow meddling with max_active for ordered workqueues */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); mutex_lock(&wq->mutex); + wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; for_each_pwq(pwq, wq) @@ -5253,7 +5268,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return -EINVAL; wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4587ebe52c7..1f1cb51005de 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -344,6 +344,13 @@ config SECTION_MISMATCH_WARN_ONLY If unsure, say Y. +# +# Enables a timestamp based low pass filter to compensate for perf based +# hard lockup detection which runs too fast due to turbo modes. +# +config HARDLOCKUP_CHECK_TIMESTAMP + bool + # # Select this config option from the architecture Kconfig, if it # is preferred to always offer frame pointers as a config diff --git a/lib/cpumask.c b/lib/cpumask.c index 81dedaab36cc..4731a0895760 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } EXPORT_SYMBOL(cpumask_any_but); +/** + * cpumask_next_wrap - helper to implement for_each_cpu_wrap + * @n: the cpu prior to the place to search + * @mask: the cpumask pointer + * @start: the start point of the iteration + * @wrap: assume @n crossing @start terminates the iteration + * + * Returns >= nr_cpu_ids on completion + * + * Note: the @wrap argument is required for the start condition when + * we cannot assume @start is set in @mask. + */ +int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) +{ + int next; + +again: + next = cpumask_next(n, mask); + + if (wrap && n < start && next >= start) { + return nr_cpumask_bits; + + } else if (next >= nr_cpumask_bits) { + wrap = true; + n = -1; + goto again; + } + + return next; +} +EXPORT_SYMBOL(cpumask_next_wrap); + /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK /** diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index a71cf1bdd4c9..2cc1f94e03a1 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, if (val < (nr_cpu_ids * PROP_BATCH)) val = percpu_counter_sum(&pl->events); - __percpu_counter_add(&pl->events, + percpu_counter_add_batch(&pl->events, -val + (val >> (period-pl->period)), PROP_BATCH); } else percpu_counter_set(&pl->events, 0); @@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) { fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } @@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p, return; } else fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9c21000df0b5..3bf4a9984f4c 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -72,7 +72,14 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) } EXPORT_SYMBOL(percpu_counter_set); -void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) +/** + * This function is both preempt and irq safe. The former is due to explicit + * preemption disable. The latter is guaranteed by the fact that the slow path + * is explicitly protected by an irq-safe spinlock whereas the fast patch uses + * this_cpu_add which is irq-safe by definition. Hence there is no need muck + * with irq state before calling this one + */ +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; @@ -89,7 +96,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) } preempt_enable(); } -EXPORT_SYMBOL(__percpu_counter_add); +EXPORT_SYMBOL(percpu_counter_add_batch); /* * Add up all the per-cpu counts, return the result. This is a more accurate diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 595b757bef72..c03ccbc405a0 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -167,7 +167,7 @@ static void cma_debugfs_add_one(struct cma *cma, int idx) char name[16]; int u32s; - sprintf(name, "cma-%s", cma->name); + scnprintf(name, sizeof(name), "cma-%s", cma->name); tmp = debugfs_create_dir(name, cma_debugfs_root); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 88c6167f194d..f4d5f9d0f9b7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1575,8 +1575,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, get_page(page); spin_unlock(ptl); split_huge_page(page); - put_page(page); unlock_page(page); + put_page(page); goto out_unlocked; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3eedb187e549..cc289933f462 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4095,6 +4095,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr = *position; unsigned long remainder = *nr_pages; struct hstate *h = hstate_vma(vma); + int err = -EFAULT; while (vaddr < vma->vm_end && remainder) { pte_t *pte; @@ -4170,11 +4171,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, } ret = hugetlb_fault(mm, vma, vaddr, fault_flags); if (ret & VM_FAULT_ERROR) { - int err = vm_fault_to_errno(ret, flags); - - if (err) - return err; - + err = vm_fault_to_errno(ret, flags); remainder = 0; break; } @@ -4229,7 +4226,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, */ *position = vaddr; - return i ? i : -EFAULT; + return i ? i : err; } #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE diff --git a/mm/internal.h b/mm/internal.h index 0e4f558412fb..9c8a2bfb975c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -498,6 +498,7 @@ extern struct workqueue_struct *mm_percpu_wq; #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH void try_to_unmap_flush(void); void try_to_unmap_flush_dirty(void); +void flush_tlb_batched_pending(struct mm_struct *mm); #else static inline void try_to_unmap_flush(void) { @@ -505,7 +506,9 @@ static inline void try_to_unmap_flush(void) static inline void try_to_unmap_flush_dirty(void) { } - +static inline void flush_tlb_batched_pending(struct mm_struct *mm) +{ +} #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ extern const struct trace_print_flags pageflag_names[]; diff --git a/mm/list_lru.c b/mm/list_lru.c index 234676e31edd..7a40fa2be858 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; + nlru->nr_items++; spin_unlock(&nlru->lock); return true; } @@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; + nlru->nr_items--; spin_unlock(&nlru->lock); return true; } @@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { - long count = 0; - int memcg_idx; + struct list_lru_node *nlru; - count += __list_lru_count_one(lru, nid, -1); - if (list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) - count += __list_lru_count_one(lru, nid, memcg_idx); - } - return count; + nlru = &lru->node[nid]; + return nlru->nr_items; } EXPORT_SYMBOL_GPL(list_lru_count_node); @@ -226,6 +223,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, assert_spin_locked(&nlru->lock); case LRU_REMOVED: isolated++; + nlru->nr_items--; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to diff --git a/mm/madvise.c b/mm/madvise.c index 25b78ee4fc2c..fc6bfbe19a16 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -320,6 +320,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, tlb_remove_check_page_size_change(tlb, PAGE_SIZE); orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; @@ -367,8 +368,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, pte_offset_map_lock(mm, pmd, addr, &ptl); goto out; } - put_page(page); unlock_page(page); + put_page(page); pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte--; addr -= PAGE_SIZE; diff --git a/mm/memblock.c b/mm/memblock.c index 7b8a5db76a2f..43d0919e29f3 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -288,31 +288,27 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u } #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK - -phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( - phys_addr_t *addr) -{ - if (memblock.reserved.regions == memblock_reserved_init_regions) - return 0; - - *addr = __pa(memblock.reserved.regions); - - return PAGE_ALIGN(sizeof(struct memblock_region) * - memblock.reserved.max); -} - -phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( - phys_addr_t *addr) +/** + * Discard memory and reserved arrays if they were allocated + */ +void __init memblock_discard(void) { - if (memblock.memory.regions == memblock_memory_init_regions) - return 0; + phys_addr_t addr, size; - *addr = __pa(memblock.memory.regions); + if (memblock.reserved.regions != memblock_reserved_init_regions) { + addr = __pa(memblock.reserved.regions); + size = PAGE_ALIGN(sizeof(struct memblock_region) * + memblock.reserved.max); + __memblock_free_late(addr, size); + } - return PAGE_ALIGN(sizeof(struct memblock_region) * - memblock.memory.max); + if (memblock.memory.regions != memblock_memory_init_regions) { + addr = __pa(memblock.memory.regions); + size = PAGE_ALIGN(sizeof(struct memblock_region) * + memblock.memory.max); + __memblock_free_late(addr, size); + } } - #endif /** diff --git a/mm/memory.c b/mm/memory.c index bb11c474857e..9e50ffcf9639 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1197,6 +1197,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); pte = start_pte; + flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { pte_t ptent = *pte; @@ -3881,8 +3882,18 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, * further. */ if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR) - && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) + && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) { + + /* + * We are going to enforce SIGBUS but the PF path might have + * dropped the mmap_sem already so take it again so that + * we do not break expectations of all arch specific PF paths + * and g-u-p + */ + if (ret & VM_FAULT_RETRY) + down_read(&vma->vm_mm->mmap_sem); ret = VM_FAULT_SIGBUS; + } return ret; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 37d0b334bfe9..e0157546e6b5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -931,11 +931,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask, *policy |= (pol->flags & MPOL_MODE_FLAGS); } - if (vma) { - up_read(¤t->mm->mmap_sem); - vma = NULL; - } - err = 0; if (nmask) { if (mpol_store_user_nodemask(pol)) { diff --git a/mm/migrate.c b/mm/migrate.c index 89a0a1707f4c..2586d5ab9b99 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -41,6 +41,7 @@ #include <linux/page_idle.h> #include <linux/page_owner.h> #include <linux/sched/mm.h> +#include <linux/ptrace.h> #include <asm/tlbflush.h> @@ -1649,7 +1650,6 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, const int __user *, nodes, int __user *, status, int, flags) { - const struct cred *cred = current_cred(), *tcred; struct task_struct *task; struct mm_struct *mm; int err; @@ -1673,14 +1673,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, /* * Check if this process has the right to modify the specified - * process. The right exists if the process has administrative - * capabilities, superuser privileges or the same - * userid as the target process. + * process. Use the regular "ptrace_may_access()" checks. */ - tcred = __task_cred(task); - if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) && - !capable(CAP_SYS_NICE)) { + if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); err = -EPERM; goto out; diff --git a/mm/mmap.c b/mm/mmap.c index a5e3dcd75e79..cc2fc8ae42a9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2232,7 +2232,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; - if (address >= TASK_SIZE) + if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; diff --git a/mm/mprotect.c b/mm/mprotect.c index 8edd0d576254..f42749e6bf4e 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -66,6 +66,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, atomic_read(&vma->vm_mm->mm_users) == 1) target_node = numa_node_id(); + flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); do { oldpte = *pte; diff --git a/mm/mremap.c b/mm/mremap.c index cd8a1b199ef9..3f23715d3c69 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -152,6 +152,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, new_ptl = pte_lockptr(mm, new_pmd); if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); + flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, @@ -428,6 +429,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, static unsigned long mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len, bool *locked, struct vm_userfaultfd_ctx *uf, + struct list_head *uf_unmap_early, struct list_head *uf_unmap) { struct mm_struct *mm = current->mm; @@ -446,7 +448,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, if (addr + old_len > new_addr && new_addr + new_len > addr) goto out; - ret = do_munmap(mm, new_addr, new_len, NULL); + ret = do_munmap(mm, new_addr, new_len, uf_unmap_early); if (ret) goto out; @@ -514,6 +516,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long charged = 0; bool locked = false; struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX; + LIST_HEAD(uf_unmap_early); LIST_HEAD(uf_unmap); if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) @@ -541,7 +544,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, if (flags & MREMAP_FIXED) { ret = mremap_to(addr, old_len, new_addr, new_len, - &locked, &uf, &uf_unmap); + &locked, &uf, &uf_unmap_early, &uf_unmap); goto out; } @@ -621,6 +624,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, up_write(¤t->mm->mmap_sem); if (locked && new_len > old_len) mm_populate(new_addr + old_len, new_len - old_len); + userfaultfd_unmap_complete(mm, &uf_unmap_early); mremap_userfaultfd_complete(&uf, addr, new_addr, old_len); userfaultfd_unmap_complete(mm, &uf_unmap); return ret; diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 487dad610731..ab998125f04d 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -146,22 +146,6 @@ static unsigned long __init free_low_memory_core_early(void) NULL) count += __free_memory_core(start, end); -#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK - { - phys_addr_t size; - - /* Free memblock.reserved array if it was allocated */ - size = get_allocated_memblock_reserved_regions_info(&start); - if (size) - count += __free_memory_core(start, start + size); - - /* Free memblock.memory array if it was allocated */ - size = get_allocated_memblock_memory_regions_info(&start); - if (size) - count += __free_memory_core(start, start + size); - } -#endif - return count; } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 143c1c25d680..b7451891959a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -601,7 +601,7 @@ static inline void __wb_writeout_inc(struct bdi_writeback *wb) { struct wb_domain *cgdom; - __inc_wb_stat(wb, WB_WRITTEN); + inc_wb_stat(wb, WB_WRITTEN); wb_domain_writeout_inc(&global_wb_domain, &wb->completions, wb->bdi->max_prop_frac); @@ -2437,8 +2437,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) __inc_node_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); __inc_node_page_state(page, NR_DIRTIED); - __inc_wb_stat(wb, WB_RECLAIMABLE); - __inc_wb_stat(wb, WB_DIRTIED); + inc_wb_stat(wb, WB_RECLAIMABLE); + inc_wb_stat(wb, WB_DIRTIED); task_io_account_write(PAGE_SIZE); current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); @@ -2745,7 +2745,7 @@ int test_clear_page_writeback(struct page *page) if (bdi_cap_account_writeback(bdi)) { struct bdi_writeback *wb = inode_to_wb(inode); - __dec_wb_stat(wb, WB_WRITEBACK); + dec_wb_stat(wb, WB_WRITEBACK); __wb_writeout_inc(wb); } } @@ -2791,7 +2791,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write) page_index(page), PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) - __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK); + inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK); /* * We can come through here when swapping anonymous diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2302f250d6b1..f553b3a6eca8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -66,6 +66,7 @@ #include <linux/kthread.h> #include <linux/memcontrol.h> #include <linux/ftrace.h> +#include <linux/nmi.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -1582,6 +1583,10 @@ void __init page_alloc_init_late(void) /* Reinit limits that are based on free pages after the kernel is up */ files_maxfiles_init(); #endif +#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK + /* Discard memblock private memory */ + memblock_discard(); +#endif for_each_populated_zone(zone) set_zone_contiguous(zone); @@ -2491,9 +2496,14 @@ void drain_all_pages(struct zone *zone) #ifdef CONFIG_HIBERNATION +/* + * Touch the watchdog for every WD_PAGE_COUNT pages. + */ +#define WD_PAGE_COUNT (128*1024) + void mark_free_pages(struct zone *zone) { - unsigned long pfn, max_zone_pfn; + unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; unsigned long flags; unsigned int order, t; struct page *page; @@ -2508,6 +2518,11 @@ void mark_free_pages(struct zone *zone) if (pfn_valid(pfn)) { page = pfn_to_page(pfn); + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } + if (page_zone(page) != zone) continue; @@ -2521,8 +2536,13 @@ void mark_free_pages(struct zone *zone) unsigned long i; pfn = page_to_pfn(page); - for (i = 0; i < (1UL << order); i++) + for (i = 0; i < (1UL << order); i++) { + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } swsusp_set_page_free(pfn_to_page(pfn + i)); + } } } spin_unlock_irqrestore(&zone->lock, flags); @@ -7567,7 +7587,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, /* Make sure the range is really isolated. */ if (test_pages_isolated(outer_start, end, false)) { - pr_info("%s: [%lx, %lx) PFNs busy\n", + pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", __func__, outer_start, end); ret = -EBUSY; goto done; diff --git a/mm/rmap.c b/mm/rmap.c index d405f0e0ee96..9835d19fe143 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); tlb_ubc->flush_required = true; + /* + * Ensure compiler does not re-order the setting of tlb_flush_batched + * before the PTE is cleared. + */ + barrier(); + mm->tlb_flush_batched = true; + /* * If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() @@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) return should_defer; } + +/* + * Reclaim unmaps pages under the PTL but do not flush the TLB prior to + * releasing the PTL if TLB flushes are batched. It's possible for a parallel + * operation such as mprotect or munmap to race between reclaim unmapping + * the page and flushing the page. If this race occurs, it potentially allows + * access to data via a stale TLB entry. Tracking all mm's that have TLB + * batching in flight would be expensive during reclaim so instead track + * whether TLB batching occurred in the past and if so then do a flush here + * if required. This will cost one additional flush per reclaim cycle paid + * by the first operation at risk such as mprotect and mumap. + * + * This must be called under the PTL so that an access to tlb_flush_batched + * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise + * via the PTL. + */ +void flush_tlb_batched_pending(struct mm_struct *mm) +{ + if (mm->tlb_flush_batched) { + flush_tlb_mm(mm); + + /* + * Do not allow the compiler to re-order the clearing of + * tlb_flush_batched before the tlb is flushed. + */ + barrier(); + mm->tlb_flush_batched = false; + } +} #else static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) { diff --git a/mm/shmem.c b/mm/shmem.c index e67d6ba4e98e..0474c7a73cfa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1021,7 +1021,11 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) */ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { spin_lock(&sbinfo->shrinklist_lock); - if (list_empty(&info->shrinklist)) { + /* + * _careful to defend against unlocked access to + * ->shrink_list in shmem_unused_huge_shrink() + */ + if (list_empty_careful(&info->shrinklist)) { list_add_tail(&info->shrinklist, &sbinfo->shrinklist); sbinfo->shrinklist_len++; @@ -1817,7 +1821,11 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, * to shrink under memory pressure. */ spin_lock(&sbinfo->shrinklist_lock); - if (list_empty(&info->shrinklist)) { + /* + * _careful to defend against unlocked access to + * ->shrink_list in shmem_unused_huge_shrink() + */ + if (list_empty_careful(&info->shrinklist)) { list_add_tail(&info->shrinklist, &sbinfo->shrinklist); sbinfo->shrinklist_len++; @@ -3956,7 +3964,7 @@ int __init shmem_init(void) } #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE - if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) + if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else shmem_huge = 0; /* just in case it was patched */ @@ -4017,7 +4025,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, return -EINVAL; shmem_huge = huge; - if (shmem_huge < SHMEM_HUGE_DENY) + if (shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; return count; } diff --git a/mm/slub.c b/mm/slub.c index 8addc535bcdc..a0f3c56611c6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5637,13 +5637,14 @@ static void sysfs_slab_remove_workfn(struct work_struct *work) * A cache is never shut down before deactivation is * complete, so no need to worry about synchronization. */ - return; + goto out; #ifdef CONFIG_MEMCG kset_unregister(s->memcg_kset); #endif kobject_uevent(&s->kobj, KOBJ_REMOVE); kobject_del(&s->kobj); +out: kobject_put(&s->kobj); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ecc97f74ab18..104eb720ba43 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1669,7 +1669,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page **pages; unsigned int nr_pages, array_size, i; const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; - const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN; + const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN; + const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ? + 0 : + __GFP_HIGHMEM; nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; array_size = (nr_pages * sizeof(struct page *)); @@ -1677,7 +1680,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, area->nr_pages = nr_pages; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, + pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, PAGE_KERNEL, node, area->caller); } else { pages = kmalloc_node(array_size, nested_gfp, node); @@ -1698,9 +1701,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, } if (node == NUMA_NO_NODE) - page = alloc_page(alloc_mask); + page = alloc_page(alloc_mask|highmem_mask); else - page = alloc_pages_node(node, alloc_mask, 0); + page = alloc_pages_node(node, alloc_mask|highmem_mask, 0); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ @@ -1708,7 +1711,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, goto fail; } area->pages[i] = page; - if (gfpflags_allow_blocking(gfp_mask)) + if (gfpflags_allow_blocking(gfp_mask|highmem_mask)) cond_resched(); } diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index fbf251fef70f..4d6b94d7ce5f 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -484,16 +484,16 @@ static int bnep_session(void *arg) struct net_device *dev = s->dev; struct sock *sk = s->sock->sk; struct sk_buff *skb; - wait_queue_t wait; + DEFINE_WAIT_FUNC(wait, woken_wake_function); BT_DBG(""); set_user_nice(current, -15); - init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); while (1) { - set_current_state(TASK_INTERRUPTIBLE); + /* Ensure session->terminate is updated */ + smp_mb__before_atomic(); if (atomic_read(&s->terminate)) break; @@ -515,9 +515,8 @@ static int bnep_session(void *arg) break; netif_wake_queue(dev); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); /* Cleanup session */ @@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req) s = __bnep_get_session(req->dst); if (s) { atomic_inc(&s->terminate); - wake_up_process(s->task); + wake_up_interruptible(sk_sleep(s->sock->sk)); } else err = -ENOENT; diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 9e59b6654126..1152ce34dad4 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -280,16 +280,16 @@ static int cmtp_session(void *arg) struct cmtp_session *session = arg; struct sock *sk = session->sock->sk; struct sk_buff *skb; - wait_queue_t wait; + DEFINE_WAIT_FUNC(wait, woken_wake_function); BT_DBG("session %p", session); set_user_nice(current, -15); - init_waitqueue_entry(&wait, current); add_wait_queue(sk_sleep(sk), &wait); while (1) { - set_current_state(TASK_INTERRUPTIBLE); + /* Ensure session->terminate is updated */ + smp_mb__before_atomic(); if (atomic_read(&session->terminate)) break; @@ -306,9 +306,8 @@ static int cmtp_session(void *arg) cmtp_process_transmit(session); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); down_write(&cmtp_session_sem); @@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) err = cmtp_attach_device(session); if (err < 0) { atomic_inc(&session->terminate); - wake_up_process(session->task); + wake_up_interruptible(sk_sleep(session->sock->sk)); up_write(&cmtp_session_sem); return err; } @@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) /* Stop session thread */ atomic_inc(&session->terminate); - wake_up_process(session->task); + + /* Ensure session->terminate is updated */ + smp_mb__after_atomic(); + + wake_up_interruptible(sk_sleep(session->sock->sk)); } else err = -ENOENT; diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 0bec4588c3c8..1fc076420d1e 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -36,6 +36,7 @@ #define VERSION "1.2" static DECLARE_RWSEM(hidp_session_sem); +static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); static LIST_HEAD(hidp_session_list); static unsigned char hidp_keycode[256] = { @@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session) * Wake up session thread and notify it to stop. This is asynchronous and * returns immediately. Call this whenever a runtime error occurs and you want * the session to stop. - * Note: wake_up_process() performs any necessary memory-barriers for us. + * Note: wake_up_interruptible() performs any necessary memory-barriers for us. */ static void hidp_session_terminate(struct hidp_session *session) { atomic_inc(&session->terminate); - wake_up_process(session->task); + wake_up_interruptible(&hidp_session_wq); } /* @@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session) struct sock *ctrl_sk = session->ctrl_sock->sk; struct sock *intr_sk = session->intr_sock->sk; struct sk_buff *skb; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + add_wait_queue(&hidp_session_wq, &wait); for (;;) { /* * This thread can be woken up two ways: @@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session) * session->terminate flag and wakes this thread up. * - Via modifying the socket state of ctrl/intr_sock. This * thread is woken up by ->sk_state_changed(). - * - * Note: set_current_state() performs any necessary - * memory-barriers for us. */ - set_current_state(TASK_INTERRUPTIBLE); + /* Ensure session->terminate is updated */ + smp_mb__before_atomic(); if (atomic_read(&session->terminate)) break; @@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session) hidp_process_transmit(session, &session->ctrl_transmit, session->ctrl_sock); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } + remove_wait_queue(&hidp_session_wq, &wait); atomic_inc(&session->terminate); - set_current_state(TASK_RUNNING); + + /* Ensure session->terminate is updated */ + smp_mb__after_atomic(); +} + +static int hidp_session_wake_function(wait_queue_t *wait, + unsigned int mode, + int sync, void *key) +{ + wake_up_interruptible(&hidp_session_wq); + return false; } /* @@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session) static int hidp_session_thread(void *arg) { struct hidp_session *session = arg; - wait_queue_t ctrl_wait, intr_wait; + DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); + DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); BT_DBG("session %p", session); @@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg) set_user_nice(current, -15); hidp_set_timer(session); - init_waitqueue_entry(&ctrl_wait, current); - init_waitqueue_entry(&intr_wait, current); add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); /* This memory barrier is paired with wq_has_sleeper(). See diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 14585edc9439..a0ef89772c36 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -23,6 +23,7 @@ #include <linux/debugfs.h> #include <linux/scatterlist.h> #include <linux/crypto.h> +#include <crypto/algapi.h> #include <crypto/b128ops.h> #include <crypto/hash.h> @@ -523,7 +524,7 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], if (err) return false; - return !memcmp(bdaddr->b, hash, 3); + return !crypto_memneq(bdaddr->b, hash, 3); } int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) @@ -579,7 +580,7 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) /* This is unlikely, but we need to check that * we didn't accidentially generate a debug key. */ - if (memcmp(smp->local_sk, debug_sk, 32)) + if (crypto_memneq(smp->local_sk, debug_sk, 32)) break; } smp->debug_key = false; @@ -993,7 +994,7 @@ static u8 smp_random(struct smp_chan *smp) if (ret) return SMP_UNSPECIFIED; - if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) { + if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) { BT_ERR("Pairing failed (confirmation values mismatch)"); return SMP_CONFIRM_FAILED; } @@ -1512,7 +1513,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) smp->rrnd, r, cfm)) return SMP_UNSPECIFIED; - if (memcmp(smp->pcnf, cfm, 16)) + if (crypto_memneq(smp->pcnf, cfm, 16)) return SMP_CONFIRM_FAILED; smp->passkey_round++; @@ -1908,7 +1909,7 @@ static u8 sc_send_public_key(struct smp_chan *smp) /* This is unlikely, but we need to check that * we didn't accidentially generate a debug key. */ - if (memcmp(smp->local_sk, debug_sk, 32)) + if (crypto_memneq(smp->local_sk, debug_sk, 32)) break; } } @@ -2176,7 +2177,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) if (err) return SMP_UNSPECIFIED; - if (memcmp(smp->pcnf, cfm, 16)) + if (crypto_memneq(smp->pcnf, cfm, 16)) return SMP_CONFIRM_FAILED; } else { smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), @@ -2660,7 +2661,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) if (err) return SMP_UNSPECIFIED; - if (memcmp(cfm.confirm_val, smp->pcnf, 16)) + if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16)) return SMP_CONFIRM_FAILED; } @@ -2693,7 +2694,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) else hcon->pending_sec_level = BT_SECURITY_FIPS; - if (!memcmp(debug_pk, smp->remote_pk, 64)) + if (!crypto_memneq(debug_pk, smp->remote_pk, 64)) set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); if (smp->method == DSP_PASSKEY) { @@ -2792,7 +2793,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) if (err) return SMP_UNSPECIFIED; - if (memcmp(check->e, e, 16)) + if (crypto_memneq(check->e, e, 16)) return SMP_DHKEY_CHECK_FAILED; if (!hcon->out) { @@ -3506,10 +3507,10 @@ static int __init test_debug_key(void) if (!generate_ecdh_keys(pk, sk)) return -EINVAL; - if (memcmp(sk, debug_sk, 32)) + if (crypto_memneq(sk, debug_sk, 32)) return -EINVAL; - if (memcmp(pk, debug_pk, 64)) + if (crypto_memneq(pk, debug_pk, 64)) return -EINVAL; return 0; @@ -3529,7 +3530,7 @@ static int __init test_ah(struct crypto_cipher *tfm_aes) if (err) return err; - if (memcmp(res, exp, 3)) + if (crypto_memneq(res, exp, 3)) return -EINVAL; return 0; @@ -3559,7 +3560,7 @@ static int __init test_c1(struct crypto_cipher *tfm_aes) if (err) return err; - if (memcmp(res, exp, 16)) + if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; @@ -3584,7 +3585,7 @@ static int __init test_s1(struct crypto_cipher *tfm_aes) if (err) return err; - if (memcmp(res, exp, 16)) + if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; @@ -3616,7 +3617,7 @@ static int __init test_f4(struct crypto_shash *tfm_cmac) if (err) return err; - if (memcmp(res, exp, 16)) + if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; @@ -3650,10 +3651,10 @@ static int __init test_f5(struct crypto_shash *tfm_cmac) if (err) return err; - if (memcmp(mackey, exp_mackey, 16)) + if (crypto_memneq(mackey, exp_mackey, 16)) return -EINVAL; - if (memcmp(ltk, exp_ltk, 16)) + if (crypto_memneq(ltk, exp_ltk, 16)) return -EINVAL; return 0; @@ -3686,7 +3687,7 @@ static int __init test_f6(struct crypto_shash *tfm_cmac) if (err) return err; - if (memcmp(res, exp, 16)) + if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; @@ -3740,7 +3741,7 @@ static int __init test_h6(struct crypto_shash *tfm_cmac) if (err) return err; - if (memcmp(res, exp, 16)) + if (crypto_memneq(res, exp, 16)) return -EINVAL; return 0; diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index b0845480a3ae..c1030f852b3c 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, __mdb_entry_to_br_ip(entry, &complete_info->ip); mdb.obj.complete_priv = complete_info; mdb.obj.complete = br_mdb_complete; - switchdev_port_obj_add(port_dev, &mdb.obj); + if (switchdev_port_obj_add(port_dev, &mdb.obj)) + kfree(complete_info); } } else if (port_dev && type == RTM_DELMDB) { switchdev_port_obj_del(port_dev, &mdb.obj); diff --git a/net/core/dev.c b/net/core/dev.c index 416137c64bf8..528edc68a64a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2670,7 +2670,7 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) return skb->ip_summed != CHECKSUM_PARTIAL && - skb->ip_summed != CHECKSUM_NONE; + skb->ip_summed != CHECKSUM_UNNECESSARY; return skb->ip_summed == CHECKSUM_NONE; } @@ -7751,7 +7751,7 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, { #if BITS_PER_LONG == 64 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); - memcpy(stats64, netdev_stats, sizeof(*stats64)); + memcpy(stats64, netdev_stats, sizeof(*netdev_stats)); /* zero out counters that only exist in rtnl_link_stats64 */ memset((char *)stats64 + sizeof(*netdev_stats), 0, sizeof(*stats64) - sizeof(*netdev_stats)); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 27fad31784a8..18f9cb9aa87d 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg) if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) return -EFAULT; + ifr.ifr_name[IFNAMSIZ-1] = 0; error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex); if (error) @@ -423,6 +424,8 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg) if (copy_from_user(&iwr, arg, sizeof(iwr))) return -EFAULT; + iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0; + return wext_handle_ioctl(net, &iwr, cmd, arg); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 467a2f4510a7..52bfeb60c886 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1977,7 +1977,8 @@ static int do_setlink(const struct sk_buff *skb, struct sockaddr *sa; int len; - len = sizeof(sa_family_t) + dev->addr_len; + len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len, + sizeof(*sa)); sa = kmalloc(len, GFP_KERNEL); if (!sa) { err = -ENOMEM; @@ -4165,6 +4166,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi switch (event) { case NETDEV_REBOOT: + case NETDEV_CHANGEADDR: case NETDEV_CHANGENAME: case NETDEV_FEAT_CHANGE: case NETDEV_BONDING_FAILOVER: diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 1704948e6a12..f227f002c73d 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk) * singleton values (which always leads to failure). * These settings can still (later) be overridden via sockopts. */ - if (ccid_get_builtin_ccids(&tx.val, &tx.len) || - ccid_get_builtin_ccids(&rx.val, &rx.len)) + if (ccid_get_builtin_ccids(&tx.val, &tx.len)) return -ENOBUFS; + if (ccid_get_builtin_ccids(&rx.val, &rx.len)) { + kfree(tx.val); + return -ENOBUFS; + } if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) || !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len)) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index f75482bdee9a..97368f229876 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -631,6 +631,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); + reqsk_put(req); return 0; drop_and_free: diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 992621172220..cf3e40df4765 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); + reqsk_put(req); return 0; drop_and_free: diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 9fe25bf63296..b68168fcc06a 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -24,6 +24,7 @@ #include <net/checksum.h> #include <net/inet_sock.h> +#include <net/inet_common.h> #include <net/sock.h> #include <net/xfrm.h> @@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type) EXPORT_SYMBOL_GPL(dccp_packet_name); +static void dccp_sk_destruct(struct sock *sk) +{ + struct dccp_sock *dp = dccp_sk(sk); + + ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); + dp->dccps_hc_tx_ccid = NULL; + inet_sock_destruct(sk); +} + int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) { struct dccp_sock *dp = dccp_sk(sk); @@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized) icsk->icsk_syn_retries = sysctl_dccp_request_retries; sk->sk_state = DCCP_CLOSED; sk->sk_write_space = dccp_write_space; + sk->sk_destruct = dccp_sk_destruct; icsk->icsk_sync_mss = dccp_sync_mss; dp->dccps_mss_cache = 536; dp->dccps_rate_last = jiffies; @@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); - /* - * DCCP doesn't use sk_write_queue, just sk_send_head - * for retransmissions - */ + __skb_queue_purge(&sk->sk_write_queue); if (sk->sk_send_head != NULL) { kfree_skb(sk->sk_send_head); sk->sk_send_head = NULL; @@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk) dp->dccps_hc_rx_ackvec = NULL; } ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk); - ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); - dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL; + dp->dccps_hc_rx_ccid = NULL; /* clean up feature negotiation state */ dccp_feat_list_purge(&dp->dccps_featneg); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 58925b6597de..ab8ebd440423 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1731,6 +1731,13 @@ static __net_init int inet_init_net(struct net *net) net->ipv4.sysctl_ip_prot_sock = PROT_SOCK; #endif + /* Some igmp sysctl, whose values are always used */ + net->ipv4.sysctl_igmp_max_memberships = 20; + net->ipv4.sysctl_igmp_max_msf = 10; + /* IGMP reports for link-local multicast groups are enabled by default */ + net->ipv4.sysctl_igmp_llm_reports = 1; + net->ipv4.sysctl_igmp_qrv = 2; + return 0; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 83e3ed258467..3acc8261477c 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1327,13 +1327,14 @@ static struct pernet_operations fib_net_ops = { void __init ip_fib_init(void) { - rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); - rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); - rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); + fib_trie_init(); register_pernet_subsys(&fib_net_ops); + register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); - fib_trie_init(); + rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); + rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); + rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ad9ad4aab5da..ac9a8fbbacfd 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1033,15 +1033,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg) fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL); if (!fi) goto failure; - fib_info_cnt++; if (cfg->fc_mx) { fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL); - if (!fi->fib_metrics) - goto failure; + if (unlikely(!fi->fib_metrics)) { + kfree(fi); + return ERR_PTR(err); + } atomic_set(&fi->fib_metrics->refcnt, 1); - } else + } else { fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; - + } + fib_info_cnt++; fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; fi->fib_scope = cfg->fc_scope; @@ -1372,7 +1374,7 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh, return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, &info.info); case FIB_EVENT_NH_DEL: - if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && + if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && fib_nh->nh_flags & RTNH_F_LINKDOWN) || (fib_nh->nh_flags & RTNH_F_DEAD)) return call_fib_notifiers(dev_net(fib_nh->nh_dev), diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index ec9a396fa466..abdbe79ee175 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1007,10 +1007,18 @@ int igmp_rcv(struct sk_buff *skb) { /* This basically follows the spec line by line -- see RFC1112 */ struct igmphdr *ih; - struct in_device *in_dev = __in_dev_get_rcu(skb->dev); + struct net_device *dev = skb->dev; + struct in_device *in_dev; int len = skb->len; bool dropped = true; + if (netif_is_l3_master(dev)) { + dev = dev_get_by_index_rcu(dev_net(dev), IPCB(skb)->iif); + if (!dev) + goto drop; + } + + in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto drop; @@ -2974,12 +2982,6 @@ static int __net_init igmp_net_init(struct net *net) goto out_sock; } - /* Sysctl initialization */ - net->ipv4.sysctl_igmp_max_memberships = 20; - net->ipv4.sysctl_igmp_max_msf = 10; - /* IGMP reports for link-local multicast groups are enabled by default */ - net->ipv4.sysctl_igmp_llm_reports = 1; - net->ipv4.sysctl_igmp_qrv = 2; return 0; out_sock: diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 532b36e9ce2a..e5948c0c9759 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -964,11 +964,12 @@ static int __ip_append_data(struct sock *sk, csummode = CHECKSUM_PARTIAL; cork->length += length; - if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || - (skb && skb_is_gso(skb))) && + if ((skb && skb_is_gso(skb)) || + (((length + (skb ? skb->len : fragheaderlen)) > mtu) && + (skb_queue_len(queue) <= 1) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && - (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) { err = ip_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, maxfraglen, flags); @@ -1287,6 +1288,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, return -EINVAL; if ((size + skb->len > mtu) && + (skb_queue_len(&sk->sk_write_queue) == 1) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO)) { if (skb->ip_summed != CHECKSUM_PARTIAL) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6883b3d4ba8f..22ba873546c3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1268,7 +1268,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) if (mtu) return mtu; - mtu = dst->dev->mtu; + mtu = READ_ONCE(dst->dev->mtu); if (unlikely(dst_metric_locked(dst, RTAX_MTU))) { if (rt->rt_uses_gateway && mtu > 576) diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 0257d965f111..4a97fe20f59e 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -332,6 +332,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; treq->ts_off = 0; + treq->txhash = net_tx_rndhash(); req->mss = mss; ireq->ir_num = ntohs(th->dest); ireq->ir_rmt_port = th->source; diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index b89bce4c721e..96c95c8d981e 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -113,7 +113,8 @@ struct bbr { cwnd_gain:10, /* current gain for setting cwnd */ full_bw_cnt:3, /* number of rounds without large bw gains */ cycle_idx:3, /* current index in pacing_gain cycle array */ - unused_b:6; + has_seen_rtt:1, /* have we seen an RTT sample yet? */ + unused_b:5; u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ u32 full_bw; /* recent bw, to estimate if pipe is full */ }; @@ -212,6 +213,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) return rate >> BW_SCALE; } +/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */ +static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain) +{ + u64 rate = bw; + + rate = bbr_rate_bytes_per_sec(sk, rate, gain); + rate = min_t(u64, rate, sk->sk_max_pacing_rate); + return rate; +} + +/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ +static void bbr_init_pacing_rate_from_rtt(struct sock *sk) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct bbr *bbr = inet_csk_ca(sk); + u64 bw; + u32 rtt_us; + + if (tp->srtt_us) { /* any RTT sample yet? */ + rtt_us = max(tp->srtt_us >> 3, 1U); + bbr->has_seen_rtt = 1; + } else { /* no RTT sample yet */ + rtt_us = USEC_PER_MSEC; /* use nominal default RTT */ + } + bw = (u64)tp->snd_cwnd * BW_UNIT; + do_div(bw, rtt_us); + sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); +} + /* Pace using current bw estimate and a gain factor. In order to help drive the * network toward lower queues while maintaining high utilization and low * latency, the average pacing rate aims to be slightly (~1%) lower than the @@ -221,12 +251,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) */ static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) { + struct tcp_sock *tp = tcp_sk(sk); struct bbr *bbr = inet_csk_ca(sk); - u64 rate = bw; + u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain); - rate = bbr_rate_bytes_per_sec(sk, rate, gain); - rate = min_t(u64, rate, sk->sk_max_pacing_rate); - if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate) + if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) + bbr_init_pacing_rate_from_rtt(sk); + if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate) sk->sk_pacing_rate = rate; } @@ -799,7 +830,6 @@ static void bbr_init(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct bbr *bbr = inet_csk_ca(sk); - u64 bw; bbr->prior_cwnd = 0; bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ @@ -815,11 +845,8 @@ static void bbr_init(struct sock *sk) minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ - /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ - bw = (u64)tp->snd_cwnd * BW_UNIT; - do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC); - sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */ - bbr_set_pacing_rate(sk, bw, bbr_high_gain); + bbr->has_seen_rtt = 0; + bbr_init_pacing_rate_from_rtt(sk); bbr->restore_cwnd = 0; bbr->round_start = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 174d4376baa5..fbaac4423a99 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2517,8 +2517,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk) return; /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || - (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { + if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && + (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } @@ -3007,8 +3007,7 @@ void tcp_rearm_rto(struct sock *sk) /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ - if (delta > 0) - rto = delta; + rto = max(delta, 1); } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4858e190f6ac..8963b8c5fb41 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3361,6 +3361,9 @@ int tcp_connect(struct sock *sk) struct sk_buff *buff; int err; + if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) + return -EHOSTUNREACH; /* Routing failure or similar. */ + tcp_connect_init(sk); if (unlikely(tp->repair)) { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 14672543cf0b..0733ea7e17cd 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -654,7 +654,8 @@ static void tcp_keepalive_timer (unsigned long data) goto death; } - if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) + if (!sock_flag(sk, SOCK_KEEPOPEN) || + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) goto out; elapsed = keepalive_time_when(tp); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1d6219bf2d6b..c991b97cbb28 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -802,7 +802,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); - else if (sk->sk_no_check_tx) { /* UDP csum disabled */ + else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */ skb->ip_summed = CHECKSUM_NONE; goto send; @@ -1762,7 +1762,7 @@ static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) /* For TCP sockets, sk_rx_dst is protected by socket lock * For UDP, we use xchg() to guard against concurrent changes. */ -static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) +void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old; @@ -2120,6 +2120,7 @@ void udp_destroy_sock(struct sock *sk) encap_destroy(sk); } } +EXPORT_SYMBOL(udp_sk_rx_dst_set); /* * Socket option code for UDP diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 781250151d40..0932c85b42af 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -235,7 +235,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, if (uh->check == 0) uh->check = CSUM_MANGLED_0; - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = CHECKSUM_UNNECESSARY; /* If there is no outer header we can fake a checksum offload * due to the fact that we have already done the checksum in diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 1d2dbace42ff..39a44c0598f7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1912,15 +1912,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) if (dad_failed) ifp->flags |= IFA_F_DADFAILED; - if (ifp->flags&IFA_F_PERMANENT) { - spin_lock_bh(&ifp->lock); - addrconf_del_dad_work(ifp); - ifp->flags |= IFA_F_TENTATIVE; - spin_unlock_bh(&ifp->lock); - if (dad_failed) - ipv6_ifa_notify(0, ifp); - in6_ifa_put(ifp); - } else if (ifp->flags&IFA_F_TEMPORARY) { + if (ifp->flags&IFA_F_TEMPORARY) { struct inet6_ifaddr *ifpub; spin_lock_bh(&ifp->lock); ifpub = ifp->ifpub; @@ -1933,6 +1925,14 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) spin_unlock_bh(&ifp->lock); } ipv6_del_addr(ifp); + } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) { + spin_lock_bh(&ifp->lock); + addrconf_del_dad_work(ifp); + ifp->flags |= IFA_F_TENTATIVE; + spin_unlock_bh(&ifp->lock); + if (dad_failed) + ipv6_ifa_notify(0, ifp); + in6_ifa_put(ifp); } else { ipv6_del_addr(ifp); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index e6b78ba0e636..cd8dd8c4e819 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -784,10 +784,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, goto next_iter; } - if (iter->dst.dev == rt->dst.dev && - iter->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&iter->rt6i_gateway, - &rt->rt6i_gateway)) { + if (rt6_duplicate_nexthop(iter, rt)) { if (rt->rt6i_nsiblings) rt->rt6i_nsiblings = 0; if (!(iter->rt6i_flags & RTF_EXPIRES)) @@ -915,6 +912,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, } nsiblings = iter->rt6i_nsiblings; fib6_purge_rt(iter, fn, info->nl_net); + if (fn->rr_ptr == iter) + fn->rr_ptr = NULL; rt6_release(iter); if (nsiblings) { @@ -927,6 +926,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, if (rt6_qualify_for_ecmp(iter)) { *ins = iter->dst.rt6_next; fib6_purge_rt(iter, fn, info->nl_net); + if (fn->rr_ptr == iter) + fn->rr_ptr = NULL; rt6_release(iter); nsiblings--; } else { @@ -1015,7 +1016,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, /* Create subtree root node */ sfn = node_alloc(); if (!sfn) - goto st_failure; + goto failure; sfn->leaf = info->nl_net->ipv6.ip6_null_entry; atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref); @@ -1031,12 +1032,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, if (IS_ERR(sn)) { /* If it is failed, discard just allocated - root, and then (in st_failure) stale node + root, and then (in failure) stale node in main tree. */ node_free(sfn); err = PTR_ERR(sn); - goto st_failure; + goto failure; } /* Now link new subtree to main tree */ @@ -1050,7 +1051,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, if (IS_ERR(sn)) { err = PTR_ERR(sn); - goto st_failure; + goto failure; } } @@ -1092,22 +1093,22 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, atomic_inc(&pn->leaf->rt6i_ref); } #endif - if (!(rt->dst.flags & DST_NOCACHE)) - dst_free(&rt->dst); + goto failure; } return err; -#ifdef CONFIG_IPV6_SUBTREES - /* Subtree creation failed, probably main tree node - is orphan. If it is, shoot it. +failure: + /* fn->leaf could be NULL if fn is an intermediate node and we + * failed to add the new route to it in both subtree creation + * failure and fib6_add_rt2node() failure case. + * In both cases, fib6_repair_tree() should be called to fix + * fn->leaf. */ -st_failure: if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) fib6_repair_tree(info->nl_net, fn); if (!(rt->dst.flags & DST_NOCACHE)) dst_free(&rt->dst); return err; -#endif } /* diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1699acb2fa2c..365d5108a326 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -673,8 +673,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } @@ -793,8 +791,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + hroom + troom, GFP_ATOMIC); if (!frag) { - IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } @@ -1390,11 +1386,12 @@ static int __ip6_append_data(struct sock *sk, */ cork->length += length; - if ((((length + (skb ? skb->len : headersize)) > mtu) || - (skb && skb_is_gso(skb))) && + if ((skb && skb_is_gso(skb)) || + (((length + (skb ? skb->len : headersize)) > mtu) && + (skb_queue_len(queue) <= 1) && (sk->sk_protocol == IPPROTO_UDP) && (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && - (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) { err = ip6_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, exthdrlen, transhdrlen, mtu, flags, fl6); diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index e9065b8d3af8..abb2c307fbe8 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident); int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { - u16 offset = sizeof(struct ipv6hdr); + unsigned int offset = sizeof(struct ipv6hdr); unsigned int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); int found_rhdr = 0; @@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) while (offset <= packet_len) { struct ipv6_opt_hdr *exthdr; + unsigned int len; switch (**nexthdr) { @@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + offset); - offset += ipv6_optlen(exthdr); + len = ipv6_optlen(exthdr); + if (len + offset >= IPV6_MAXPLEN) + return -EINVAL; + offset += len; *nexthdr = &exthdr->nexthdr; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 322bd62e688b..aeb7097acc0a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2366,6 +2366,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; + nrt->rt6i_protocol = RTPROT_REDIRECT; nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; if (ip6_ins_rt(nrt)) @@ -2470,6 +2471,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net, .fc_dst_len = prefixlen, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref), + .fc_protocol = RTPROT_RA, .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = net, @@ -2522,6 +2524,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, .fc_ifindex = dev->ifindex, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), + .fc_protocol = RTPROT_RA, .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = dev_net(dev), @@ -3048,17 +3051,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) { struct rt6_nh *nh; - struct rt6_info *rtnh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { /* check if rt6_info already exists */ - rtnh = nh->rt6_info; - - if (rtnh->dst.dev == rt->dst.dev && - rtnh->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&rtnh->rt6i_gateway, - &rt->rt6i_gateway)) + if (rt6_duplicate_nexthop(nh->rt6_info, rt)) return err; } @@ -3440,14 +3437,6 @@ static int rt6_fill_node(struct net *net, rtm->rtm_flags = 0; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; - if (rt->rt6i_flags & RTF_DYNAMIC) - rtm->rtm_protocol = RTPROT_REDIRECT; - else if (rt->rt6i_flags & RTF_ADDRCONF) { - if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) - rtm->rtm_protocol = RTPROT_RA; - else - rtm->rtm_protocol = RTPROT_KERNEL; - } if (rt->rt6i_flags & RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5abc3692b901..ca7895454cec 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -215,6 +215,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; treq->ts_off = 0; + treq->txhash = net_tx_rndhash(); /* * We need to lookup the dst_entry to get the correct window size. diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 75703fda23e7..592270c310f4 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -291,11 +291,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, struct udp_table *udptable) { const struct ipv6hdr *iph = ipv6_hdr(skb); - struct sock *sk; - sk = skb_steal_sock(skb); - if (unlikely(sk)) - return sk; return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), udptable, skb); @@ -798,6 +794,24 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (udp6_csum_init(skb, uh, proto)) goto csum_error; + /* Check if the socket is already available, e.g. due to early demux */ + sk = skb_steal_sock(skb); + if (sk) { + struct dst_entry *dst = skb_dst(skb); + int ret; + + if (unlikely(sk->sk_rx_dst != dst)) + udp_sk_rx_dst_set(sk, dst); + + ret = udpv6_queue_rcv_skb(sk, skb); + sock_put(sk); + + /* a return value > 0 means to resubmit the input */ + if (ret > 0) + return ret; + return 0; + } + /* * Multicast receive code */ @@ -806,11 +820,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, saddr, daddr, udptable, proto); /* Unicast */ - - /* - * check socket cache ... must talk to Alan about his plans - * for sock caches... i'll skip this for now. - */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { int ret; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index a2267f80febb..e7d378c032cb 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -72,7 +72,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, if (uh->check == 0) uh->check = CSUM_MANGLED_0; - skb->ip_summed = CHECKSUM_NONE; + skb->ip_summed = CHECKSUM_UNNECESSARY; /* If there is no outer header we can fake a checksum offload * due to the fact that we have already done the checksum in diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 8d77ad5cadaf..4cadc29f547c 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -2225,7 +2225,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); - struct irda_device_list list; + struct irda_device_list list = { 0 }; struct irda_device_info *discoveries; struct irda_ias_set * ias_opt; /* IAS get/query params */ struct ias_object * ias_obj; /* Object in IAS */ diff --git a/net/key/af_key.c b/net/key/af_key.c index b1432b668033..166e32c93038 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, #define BROADCAST_ONE 1 #define BROADCAST_REGISTERED 2 #define BROADCAST_PROMISC_ONLY 4 -static int pfkey_broadcast(struct sk_buff *skb, +static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, int broadcast_flags, struct sock *one_sk, struct net *net) { @@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb, rcu_read_unlock(); if (one_sk != NULL) - err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); + err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); kfree_skb(skb2); kfree_skb(skb); @@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) hdr = (struct sadb_msg *) pfk->dump.skb->data; hdr->sadb_msg_seq = 0; hdr->sadb_msg_errno = rc; - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = NULL; } @@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); - pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); + pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); return 0; } @@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ xfrm_state_put(x); - pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); + pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); return 0; } @@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; - pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); return 0; } @@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg out_hdr->sadb_msg_reserved = 0; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; - pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); return 0; } @@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad return -ENOBUFS; } - pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); - + pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, + sock_net(sk)); return 0; } @@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); - return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, + sock_net(sk)); } static int key_notify_sa_flush(const struct km_event *c) @@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c) hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_reserved = 0; - pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } @@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) out_hdr->sadb_msg_pid = pfk->dump.msg_portid; if (pfk->dump.skb) - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = out_skb; @@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb new_hdr->sadb_msg_errno = 0; } - pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); + pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); return 0; } @@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_seq = c->seq; out_hdr->sadb_msg_pid = c->portid; - pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); return 0; } @@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; - pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); err = 0; out: @@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) out_hdr->sadb_msg_pid = pfk->dump.msg_portid; if (pfk->dump.skb) - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = out_skb; @@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c) hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_reserved = 0; - pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); + pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } @@ -2816,7 +2817,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb void *ext_hdrs[SADB_EXT_MAX]; int err; - pfkey_broadcast(skb_clone(skb, GFP_KERNEL), + pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); memset(ext_hdrs, 0, sizeof(ext_hdrs)); @@ -3038,7 +3039,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) out_hdr->sadb_msg_seq = 0; out_hdr->sadb_msg_pid = 0; - pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, + xs_net(x)); return 0; } @@ -3228,7 +3230,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_ctx->ctx_len); } - return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, + xs_net(x)); } static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, @@ -3426,7 +3429,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, n_port->sadb_x_nat_t_port_port = sport; n_port->sadb_x_nat_t_port_reserved = 0; - return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, + xs_net(x)); } #ifdef CONFIG_NET_KEY_MIGRATE @@ -3618,7 +3622,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, } /* broadcast migrate message to sockets */ - pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); return 0; diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index e03d16ed550d..899c2c36da13 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -422,7 +422,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) h = nf_ct_expect_dst_hash(net, &expect->tuple); hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { if (expect_matches(i, expect)) { - if (nf_ct_remove_expect(expect)) + if (nf_ct_remove_expect(i)) break; } else if (expect_clash(i, expect)) { ret = -EBUSY; diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 6c72922d20ca..b93a46ef812d 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -222,20 +222,21 @@ find_appropriate_src(struct net *net, .tuple = tuple, .zone = zone }; - struct rhlist_head *hl; + struct rhlist_head *hl, *h; hl = rhltable_lookup(&nf_nat_bysource_table, &key, nf_nat_bysource_params); - if (!hl) - return 0; - ct = container_of(hl, typeof(*ct), nat_bysource); + rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) { + nf_ct_invert_tuplepr(result, + &ct->tuplehash[IP_CT_DIR_REPLY].tuple); + result->dst = tuple->dst; - nf_ct_invert_tuplepr(result, - &ct->tuplehash[IP_CT_DIR_REPLY].tuple); - result->dst = tuple->dst; + if (in_range(l3proto, l4proto, result, range)) + return 1; + } - return in_range(l3proto, l4proto, result, range); + return 0; } /* For [FUTURE] fragmentation handling, we want the least-used diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 80f5ecf2c3d7..ff1f4ce6fba4 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -463,8 +463,7 @@ static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh) if (msglen > skb->len) msglen = skb->len; - if (nlh->nlmsg_len < NLMSG_HDRLEN || - skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) + if (skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg)) return; err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy, @@ -491,7 +490,8 @@ static void nfnetlink_rcv(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < NLMSG_HDRLEN || + if (skb->len < NLMSG_HDRLEN || + nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return; diff --git a/net/nfc/core.c b/net/nfc/core.c index 122bb81da918..5cf33df888c3 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -982,6 +982,8 @@ static void nfc_release(struct device *d) kfree(se); } + ida_simple_remove(&nfc_index_ida, dev->idx); + kfree(dev); } @@ -1056,6 +1058,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, int tx_headroom, int tx_tailroom) { struct nfc_dev *dev; + int rc; if (!ops->start_poll || !ops->stop_poll || !ops->activate_target || !ops->deactivate_target || !ops->im_transceive) @@ -1068,6 +1071,15 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, if (!dev) return NULL; + rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL); + if (rc < 0) + goto err_free_dev; + dev->idx = rc; + + dev->dev.class = &nfc_class; + dev_set_name(&dev->dev, "nfc%d", dev->idx); + device_initialize(&dev->dev); + dev->ops = ops; dev->supported_protocols = supported_protocols; dev->tx_headroom = tx_headroom; @@ -1090,6 +1102,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops, } return dev; + +err_free_dev: + kfree(dev); + + return ERR_PTR(rc); } EXPORT_SYMBOL(nfc_allocate_device); @@ -1104,14 +1121,6 @@ int nfc_register_device(struct nfc_dev *dev) pr_debug("dev_name=%s\n", dev_name(&dev->dev)); - dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL); - if (dev->idx < 0) - return dev->idx; - - dev->dev.class = &nfc_class; - dev_set_name(&dev->dev, "nfc%d", dev->idx); - device_initialize(&dev->dev); - mutex_lock(&nfc_devlist_mutex); nfc_devlist_generation++; rc = device_add(&dev->dev); @@ -1149,12 +1158,10 @@ EXPORT_SYMBOL(nfc_register_device); */ void nfc_unregister_device(struct nfc_dev *dev) { - int rc, id; + int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); - id = dev->idx; - if (dev->rfkill) { rfkill_unregister(dev->rfkill); rfkill_destroy(dev->rfkill); @@ -1179,8 +1186,6 @@ void nfc_unregister_device(struct nfc_dev *dev) nfc_devlist_generation++; device_del(&dev->dev); mutex_unlock(&nfc_devlist_mutex); - - ida_simple_remove(&nfc_index_ida, id); } EXPORT_SYMBOL(nfc_unregister_device); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 2ffb18e73df6..fb7afcaa3004 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -77,7 +77,8 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; - if (!addr || addr->sa_family != AF_NFC) + if (!addr || alen < offsetofend(struct sockaddr, sa_family) || + addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); @@ -151,7 +152,8 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; - if (!addr || addr->sa_family != AF_NFC) + if (!addr || alen < offsetofend(struct sockaddr, sa_family) || + addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); @@ -662,8 +664,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); - if (!addr || len < sizeof(struct sockaddr_nfc) || - addr->sa_family != AF_NFC) + if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC) return -EINVAL; if (addr->service_name_len == 0 && addr->dsap == 0) diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 61fff422424f..85a3d9ed4c29 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1173,8 +1173,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops, return ndev; free_nfc: - kfree(ndev->nfc_dev); - + nfc_free_device(ndev->nfc_dev); free_nci: kfree(ndev); return NULL; diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 6b0850e63e09..b251fb936a27 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -907,7 +907,9 @@ static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info) u32 device_idx, target_idx, protocol; int rc; - if (!info->attrs[NFC_ATTR_DEVICE_INDEX]) + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] || + !info->attrs[NFC_ATTR_TARGET_INDEX] || + !info->attrs[NFC_ATTR_PROTOCOLS]) return -EINVAL; device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index e4610676299b..a54a556fcdb5 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1337,6 +1337,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, goto out; } + OVS_CB(skb)->acts_origlen = acts->orig_len; err = do_execute_actions(dp, skb, key, acts->actions, acts->actions_len); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 08679ebb3068..b3bf66bbf4dc 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1289,8 +1289,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, nla_for_each_nested(a, attr, rem) { int type = nla_type(a); - int maxlen = ovs_ct_attr_lens[type].maxlen; - int minlen = ovs_ct_attr_lens[type].minlen; + int maxlen; + int minlen; if (type > OVS_CT_ATTR_MAX) { OVS_NLERR(log, @@ -1298,6 +1298,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, type, OVS_CT_ATTR_MAX); return -EINVAL; } + + maxlen = ovs_ct_attr_lens[type].maxlen; + minlen = ovs_ct_attr_lens[type].minlen; if (nla_len(a) < minlen || nla_len(a) > maxlen) { OVS_NLERR(log, "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)", diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 7b17da9a94a0..57ce10b6cf6b 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -381,7 +381,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, } static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, - unsigned int hdrlen) + unsigned int hdrlen, int actions_attrlen) { size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ @@ -398,7 +398,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, /* OVS_PACKET_ATTR_ACTIONS */ if (upcall_info->actions_len) - size += nla_total_size(upcall_info->actions_len); + size += nla_total_size(actions_attrlen); /* OVS_PACKET_ATTR_MRU */ if (upcall_info->mru) @@ -465,7 +465,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, else hlen = skb->len; - len = upcall_msg_size(upcall_info, hlen - cutlen); + len = upcall_msg_size(upcall_info, hlen - cutlen, + OVS_CB(skb)->acts_origlen); user_skb = genlmsg_new(len, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index da931bdef8a7..98a28f78aff2 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -98,12 +98,14 @@ struct datapath { * @input_vport: The original vport packet came in on. This value is cached * when a packet is received by OVS. * @mru: The maximum received fragement size; 0 if the packet is not + * @acts_origlen: The netlink size of the flow actions applied to this skb. * @cutlen: The number of bytes from the packet end to be removed. * fragmented. */ struct ovs_skb_cb { struct vport *input_vport; u16 mru; + u16 acts_origlen; u32 cutlen; }; #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e3eeed19cc7a..aa2d4000bafc 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3705,14 +3705,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; if (val > INT_MAX) return -EINVAL; - po->tp_reserve = val; - return 0; + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_reserve = val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_LOSS: { @@ -4334,7 +4339,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, register_prot_hook(sk); } spin_unlock(&po->bind_lock); - if (closing && (po->tp_version > TPACKET_V2)) { + if (pg_vec && (po->tp_version > TPACKET_V2)) { /* Because we don't support block-based V3 on tx-ring */ if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 507678853e6c..9a1798d6ae94 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -125,7 +125,7 @@ int rds_tcp_accept_one(struct socket *sock) if (!sock) /* module unload or netns delete in progress */ return -ENETUNREACH; - ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, + ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, sock->sk->sk_protocol, &new_sock); if (ret) diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 36f0ced9e60c..541707802a23 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -36,11 +36,12 @@ static struct tc_action_ops act_ipt_ops; static unsigned int xt_net_id; static struct tc_action_ops act_xt_ops; -static int ipt_init_target(struct xt_entry_target *t, char *table, - unsigned int hook) +static int ipt_init_target(struct net *net, struct xt_entry_target *t, + char *table, unsigned int hook) { struct xt_tgchk_param par; struct xt_target *target; + struct ipt_entry e = {}; int ret = 0; target = xt_request_find_target(AF_INET, t->u.user.name, @@ -49,8 +50,10 @@ static int ipt_init_target(struct xt_entry_target *t, char *table, return PTR_ERR(target); t->u.kernel.target = target; + memset(&par, 0, sizeof(par)); + par.net = net; par.table = table; - par.entryinfo = NULL; + par.entryinfo = &e; par.target = target; par.targinfo = t->data; par.hook_mask = hook; @@ -91,10 +94,11 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = { [TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) }, }; -static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, +static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, struct nlattr *est, struct tc_action **a, const struct tc_action_ops *ops, int ovr, int bind) { + struct tc_action_net *tn = net_generic(net, id); struct nlattr *tb[TCA_IPT_MAX + 1]; struct tcf_ipt *ipt; struct xt_entry_target *td, *t; @@ -159,7 +163,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla, if (unlikely(!t)) goto err2; - err = ipt_init_target(t, tname, hook); + err = ipt_init_target(net, t, tname, hook); if (err < 0) goto err3; @@ -193,18 +197,16 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { - struct tc_action_net *tn = net_generic(net, ipt_net_id); - - return __tcf_ipt_init(tn, nla, est, a, &act_ipt_ops, ovr, bind); + return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, + bind); } static int tcf_xt_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { - struct tc_action_net *tn = net_generic(net, xt_net_id); - - return __tcf_ipt_init(tn, nla, est, a, &act_xt_ops, ovr, bind); + return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, + bind); } static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a, diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index cfdbfa18a95e..fdbbdfd8e9a8 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -286,9 +286,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) void qdisc_hash_add(struct Qdisc *q, bool invisible) { if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { - struct Qdisc *root = qdisc_dev(q)->qdisc; - - WARN_ON_ONCE(root == &noop_qdisc); ASSERT_RTNL(); hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); if (invisible) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 332d94be6e1c..22451a9eb89d 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -435,6 +435,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) qdisc_drop(head, sch, to_free); slot_queue_add(slot, skb); + qdisc_tree_reduce_backlog(sch, 0, delta); return NET_XMIT_CN; } @@ -466,8 +467,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) /* Return Congestion Notification only if we dropped a packet * from this flow. */ - if (qlen != slot->qlen) + if (qlen != slot->qlen) { + qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); return NET_XMIT_CN; + } /* As we dropped a packet, better let upper stack know this */ qdisc_tree_reduce_backlog(sch, 1, dropped); diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f5b45b8b8b16..0de5f5f8ddbc 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -510,7 +510,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, { addr->sa.sa_family = AF_INET6; addr->v6.sin6_port = port; + addr->v6.sin6_flowinfo = 0; addr->v6.sin6_addr = *saddr; + addr->v6.sin6_scope_id = 0; } /* Compare addresses exactly. diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 92e332e17391..961a6f81ae64 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -228,7 +228,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; - __u8 extensions[3]; + __u8 extensions[4]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL; @@ -396,7 +396,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; - __u8 extensions[3]; + __u8 extensions[4]; sctp_paramhdr_t *auth_chunks = NULL, *auth_hmacs = NULL, *auth_random = NULL; diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index fb39284ec174..12649c9fedab 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -34,6 +34,7 @@ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ +#include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/skcipher.h> #include <linux/err.h> @@ -927,7 +928,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, if (ret) goto out_err; - if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { + if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { ret = GSS_S_BAD_SIG; goto out_err; } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 2b720fa35c4f..e18500151236 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -421,6 +421,9 @@ static void svc_data_ready(struct sock *sk) dprintk("svc: socket %p(inet %p), busy=%d\n", svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); + + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_odata(sk); if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) svc_xprt_enqueue(&svsk->sk_xprt); @@ -437,6 +440,9 @@ static void svc_write_space(struct sock *sk) if (svsk) { dprintk("svc: socket %p(inet %p), write_space busy=%d\n", svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)); + + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_owspace(sk); svc_xprt_enqueue(&svsk->sk_xprt); } @@ -760,8 +766,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk) dprintk("svc: socket %p TCP (listen) state change %d\n", sk, sk->sk_state); - if (svsk) + if (svsk) { + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_odata(sk); + } + /* * This callback may called twice when a new connection * is established as a child socket inherits everything @@ -794,6 +804,8 @@ static void svc_tcp_state_change(struct sock *sk) if (!svsk) printk("svc: socket %p: no user data\n", sk); else { + /* Refer to svc_setup_socket() for details. */ + rmb(); svsk->sk_ostate(sk); if (sk->sk_state != TCP_ESTABLISHED) { set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); @@ -1381,12 +1393,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, return ERR_PTR(err); } - inet->sk_user_data = svsk; svsk->sk_sock = sock; svsk->sk_sk = inet; svsk->sk_ostate = inet->sk_state_change; svsk->sk_odata = inet->sk_data_ready; svsk->sk_owspace = inet->sk_write_space; + /* + * This barrier is necessary in order to prevent race condition + * with svc_data_ready(), svc_listen_data_ready() and others + * when calling callbacks above. + */ + wmb(); + inet->sk_user_data = svsk; /* Initialize the socket */ if (sock->type == SOCK_DGRAM) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d5b54c020dec..4f154d388748 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1624,6 +1624,8 @@ static void xs_tcp_state_change(struct sock *sk) if (test_and_clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state)) xprt_clear_connecting(xprt); + if (sk->sk_err) + xprt_wake_pending_tasks(xprt, -sk->sk_err); xs_sock_mark_closed(xprt); } out: diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 9bfe886ab330..750949dfc1d7 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, arg = nlmsg_new(0, GFP_KERNEL); if (!arg) { kfree_skb(msg->rep); + msg->rep = NULL; return -ENOMEM; } err = __tipc_nl_compat_dumpit(cmd, msg, arg); - if (err) + if (err) { kfree_skb(msg->rep); - + msg->rep = NULL; + } kfree_skb(arg); return err; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c3bc9da30cff..571aed1e6f36 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -291,8 +291,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, - [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, - .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, @@ -348,6 +347,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, + [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 }, [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, @@ -520,7 +520,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = { static const struct nla_policy nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = { [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 }, - [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY, + [NL80211_NAN_FUNC_SERVICE_ID] = { .len = NL80211_NAN_FUNC_SERVICE_ID_LEN }, [NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 }, [NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG }, @@ -6469,6 +6469,10 @@ static int validate_scan_freqs(struct nlattr *freqs) struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; + nla_for_each_nested(attr1, freqs, tmp1) + if (nla_len(attr1) != sizeof(u32)) + return 0; + nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 4b9569fa931b..33e50614a5d9 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3541,7 +3541,7 @@ sub process { $fixedline =~ s/\s*=\s*$/ = {/; fix_insert_line($fixlinenr, $fixedline); $fixedline = $line; - $fixedline =~ s/^(.\s*){\s*/$1/; + $fixedline =~ s/^(.\s*)\{\s*/$1/; fix_insert_line($fixlinenr, $fixedline); } } @@ -3882,7 +3882,7 @@ sub process { my $fixedline = rtrim($prevrawline) . " {"; fix_insert_line($fixlinenr, $fixedline); $fixedline = $rawline; - $fixedline =~ s/^(.\s*){\s*/$1\t/; + $fixedline =~ s/^(.\s*)\{\s*/$1\t/; if ($fixedline !~ /^\+\s*$/) { fix_insert_line($fixlinenr, $fixedline); } @@ -4371,7 +4371,7 @@ sub process { if (ERROR("SPACING", "space required before the open brace '{'\n" . $herecurr) && $fix) { - $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/; + $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/; } } diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff index ec47f95991a3..586cccea46ce 100755 --- a/scripts/dtc/dtx_diff +++ b/scripts/dtc/dtx_diff @@ -321,7 +321,7 @@ fi cpp_flags="\ -nostdinc \ -I${srctree}/arch/${ARCH}/boot/dts \ - -I${srctree}/arch/${ARCH}/boot/dts/include \ + -I${srctree}/scripts/dtc/include-prefixes \ -I${srctree}/drivers/of/testcase-data \ -undef -D__DTS__" diff --git a/security/keys/compat_dh.c b/security/keys/compat_dh.c index a6a659b6bcb6..aa6b34cafe5f 100644 --- a/security/keys/compat_dh.c +++ b/security/keys/compat_dh.c @@ -33,6 +33,8 @@ long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params, kdfcopy.hashname = compat_ptr(compat_kdfcopy.hashname); kdfcopy.otherinfo = compat_ptr(compat_kdfcopy.otherinfo); kdfcopy.otherinfolen = compat_kdfcopy.otherinfolen; + memcpy(kdfcopy.__spare, compat_kdfcopy.__spare, + sizeof(kdfcopy.__spare)); return __keyctl_dh_compute(params, buffer, buflen, &kdfcopy); } diff --git a/security/keys/dh.c b/security/keys/dh.c index 4755d4b4f945..d1ea9f325f94 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c @@ -266,6 +266,11 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, if (kdfcopy) { char *hashname; + if (memchr_inv(kdfcopy->__spare, 0, sizeof(kdfcopy->__spare))) { + ret = -EINVAL; + goto out1; + } + if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN || kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) { ret = -EMSGSIZE; diff --git a/sound/core/control.c b/sound/core/control.c index c109b82eef4b..7b43b0f74b84 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1157,7 +1157,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, mutex_lock(&ue->card->user_ctl_lock); change = ue->tlv_data_size != size; if (!change) - change = memcmp(ue->tlv_data, new_data, size); + change = memcmp(ue->tlv_data, new_data, size) != 0; kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index f3b1d7f50b81..67c4c68ce041 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1502,16 +1502,11 @@ static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client, static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg) { struct snd_seq_queue_info *info = arg; - int result; struct snd_seq_queue *q; - result = snd_seq_queue_alloc(client->number, info->locked, info->flags); - if (result < 0) - return result; - - q = queueptr(result); - if (q == NULL) - return -EINVAL; + q = snd_seq_queue_alloc(client->number, info->locked, info->flags); + if (IS_ERR(q)) + return PTR_ERR(q); info->queue = q->queue; info->locked = q->locked; @@ -1521,7 +1516,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg) if (!info->name[0]) snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); strlcpy(q->name, info->name, sizeof(q->name)); - queuefree(q); + snd_use_lock_free(&q->use_lock); return 0; } diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 450c5187eecb..79e0c5604ef8 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void) static void queue_use(struct snd_seq_queue *queue, int client, int use); /* allocate a new queue - - * return queue index value or negative value for error + * return pointer to new queue or ERR_PTR(-errno) for error + * The new queue's use_lock is set to 1. It is the caller's responsibility to + * call snd_use_lock_free(&q->use_lock). */ -int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) +struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags) { struct snd_seq_queue *q; q = queue_new(client, locked); if (q == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); q->info_flags = info_flags; queue_use(q, client, 1); + snd_use_lock_use(&q->use_lock); if (queue_list_add(q) < 0) { + snd_use_lock_free(&q->use_lock); queue_delete(q); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } - return q->queue; + return q; } /* delete a queue - queue must be owned by the client */ diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h index 30c8111477f6..719093489a2c 100644 --- a/sound/core/seq/seq_queue.h +++ b/sound/core/seq/seq_queue.h @@ -71,7 +71,7 @@ void snd_seq_queues_delete(void); /* create new queue (constructor) */ -int snd_seq_queue_alloc(int client, int locked, unsigned int flags); +struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags); /* delete queue (destructor) */ int snd_seq_queue_delete(int client, int queueid); diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c index f0e4d502d604..066b5df666f4 100644 --- a/sound/firewire/iso-resources.c +++ b/sound/firewire/iso-resources.c @@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update); */ void fw_iso_resources_free(struct fw_iso_resources *r) { - struct fw_card *card = fw_parent_device(r->unit)->card; + struct fw_card *card; int bandwidth, channel; + /* Not initialized. */ + if (r->unit == NULL) + return; + card = fw_parent_device(r->unit)->card; + mutex_lock(&r->mutex); if (r->allocated) { diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c index bf779cfeef0d..59a270406353 100644 --- a/sound/firewire/motu/motu.c +++ b/sound/firewire/motu/motu.c @@ -128,6 +128,7 @@ static void do_registration(struct work_struct *work) return; error: snd_motu_transaction_unregister(motu); + snd_motu_stream_destroy_duplex(motu); snd_card_free(motu->card); dev_info(&motu->unit->device, "Sound card registration failed: %d\n", err); diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c index c47287d79306..a178e0d03088 100644 --- a/sound/pci/fm801.c +++ b/sound/pci/fm801.c @@ -1235,8 +1235,6 @@ static int snd_fm801_create(struct snd_card *card, } } - snd_fm801_chip_init(chip); - if ((chip->tea575x_tuner & TUNER_ONLY) == 0) { if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { @@ -1248,6 +1246,8 @@ static int snd_fm801_create(struct snd_card *card, pci_set_master(pci); } + snd_fm801_chip_init(chip); + if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_fm801_free(chip); return err; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 63bc894ddf5e..a81aacf684b2 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -933,6 +933,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), @@ -946,6 +947,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI), SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 90e4ff87445e..c87ff8e5d1d5 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3757,11 +3757,15 @@ HDA_CODEC_ENTRY(0x1002aa01, "R6xx HDMI", patch_atihdmi), HDA_CODEC_ENTRY(0x10951390, "SiI1390 HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x10951392, "SiI1392 HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x17e80047, "Chrontel HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x10de0001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de0002, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), HDA_CODEC_ENTRY(0x10de0003, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), +HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI", patch_nvhdmi_8ch_7x), HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x), HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x), +HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi), @@ -3788,17 +3792,40 @@ HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0043, "GPU 43 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0044, "GPU 44 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0045, "GPU 45 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0050, "GPU 50 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0051, "GPU 51 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0052, "GPU 52 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0060, "GPU 60 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0061, "GPU 61 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0062, "GPU 62 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0067, "MCP67 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0073, "GPU 73 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0074, "GPU 74 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0076, "GPU 76 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de007b, "GPU 7b HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de007c, "GPU 7c HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de007e, "GPU 7e HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0081, "GPU 81 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0084, "GPU 84 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0090, "GPU 90 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0091, "GPU 91 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0092, "GPU 92 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0093, "GPU 93 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0094, "GPU 94 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi), +HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), +HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index cbeebc0a9711..606d5333ff98 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2296,6 +2296,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), @@ -4964,6 +4965,7 @@ enum { ALC233_FIXUP_ASUS_MIC_NO_PRESENCE, ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE, ALC233_FIXUP_LENOVO_MULTI_CODECS, + ALC294_FIXUP_LENOVO_MIC_LOCATION, }; static const struct hda_fixup alc269_fixups[] = { @@ -5737,6 +5739,18 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_alc662_fixup_lenovo_dual_codecs, }, + [ALC294_FIXUP_LENOVO_MIC_LOCATION] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + /* Change the mic location from front to right, otherwise there are + two front mics with the same name, pulseaudio can't handle them. + This is just a temporary workaround, after applying this fixup, + there will be one "Front Mic" and one "Mic" in this machine. + */ + { 0x1a, 0x04a19040 }, + { } + }, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -5918,6 +5932,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), diff --git a/sound/soc/atmel/tse850-pcm5142.c b/sound/soc/atmel/tse850-pcm5142.c index a72c7d642026..3a1393283156 100644 --- a/sound/soc/atmel/tse850-pcm5142.c +++ b/sound/soc/atmel/tse850-pcm5142.c @@ -227,7 +227,7 @@ int tse850_put_ana(struct snd_kcontrol *kctrl, static const char * const mux_text[] = { "Mixer", "Loop" }; static const struct soc_enum mux_enum = - SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 2, mux_text); + SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(mux_text), mux_text); static const struct snd_kcontrol_new mux1 = SOC_DAPM_ENUM_EXT("MUX1", mux_enum, tse850_get_mux1, tse850_put_mux1); @@ -252,7 +252,7 @@ static const char * const ana_text[] = { }; static const struct soc_enum ana_enum = - SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 9, ana_text); + SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(ana_text), ana_text); static const struct snd_kcontrol_new out = SOC_DAPM_ENUM_EXT("ANA", ana_enum, tse850_get_ana, tse850_put_ana); diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index bfd71b873ca2..206f36bf43e8 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c @@ -81,7 +81,8 @@ static int soc_compr_open(struct snd_compr_stream *cstream) static int soc_compr_open_fe(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *fe = cstream->private_data; - struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream; + struct snd_pcm_substream *fe_substream = + fe->pcm->streams[cstream->direction].substream; struct snd_soc_platform *platform = fe->platform; struct snd_soc_dai *cpu_dai = fe->cpu_dai; struct snd_soc_dpcm *dpcm; @@ -467,7 +468,8 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream, struct snd_compr_params *params) { struct snd_soc_pcm_runtime *fe = cstream->private_data; - struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream; + struct snd_pcm_substream *fe_substream = + fe->pcm->streams[cstream->direction].substream; struct snd_soc_platform *platform = fe->platform; struct snd_soc_dai *cpu_dai = fe->cpu_dai; int ret = 0, stream; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 754e3ef8d7ae..d05acc8eed1f 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3139,8 +3139,6 @@ static int snd_soc_component_initialize(struct snd_soc_component *component, component->remove = component->driver->remove; component->suspend = component->driver->suspend; component->resume = component->driver->resume; - component->pcm_new = component->driver->pcm_new; - component->pcm_free = component->driver->pcm_free; dapm = &component->dapm; dapm->dev = dev; @@ -3328,25 +3326,6 @@ static void snd_soc_platform_drv_remove(struct snd_soc_component *component) platform->driver->remove(platform); } -static int snd_soc_platform_drv_pcm_new(struct snd_soc_pcm_runtime *rtd) -{ - struct snd_soc_platform *platform = rtd->platform; - - if (platform->driver->pcm_new) - return platform->driver->pcm_new(rtd); - else - return 0; -} - -static void snd_soc_platform_drv_pcm_free(struct snd_pcm *pcm) -{ - struct snd_soc_pcm_runtime *rtd = pcm->private_data; - struct snd_soc_platform *platform = rtd->platform; - - if (platform->driver->pcm_free) - platform->driver->pcm_free(pcm); -} - /** * snd_soc_add_platform - Add a platform to the ASoC core * @dev: The parent device for the platform @@ -3370,10 +3349,6 @@ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform, platform->component.probe = snd_soc_platform_drv_probe; if (platform_drv->remove) platform->component.remove = snd_soc_platform_drv_remove; - if (platform_drv->pcm_new) - platform->component.pcm_new = snd_soc_platform_drv_pcm_new; - if (platform_drv->pcm_free) - platform->component.pcm_free = snd_soc_platform_drv_pcm_free; #ifdef CONFIG_DEBUG_FS platform->component.debugfs_prefix = "platform"; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index efc5831f205d..8ff7cd3b8c1f 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -181,6 +181,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir, dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n", be->dai_link->name, event, dir); + if ((event == SND_SOC_DAPM_STREAM_STOP) && + (be->dpcm[dir].users >= 1)) + continue; + snd_soc_dapm_stream_event(be, dir, event); } @@ -2628,25 +2632,12 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream) return ret; } -static void soc_pcm_free(struct snd_pcm *pcm) -{ - struct snd_soc_pcm_runtime *rtd = pcm->private_data; - struct snd_soc_component *component; - - list_for_each_entry(component, &rtd->card->component_dev_list, - card_list) { - if (component->pcm_free) - component->pcm_free(pcm); - } -} - /* create a new pcm */ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num) { struct snd_soc_platform *platform = rtd->platform; struct snd_soc_dai *codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; - struct snd_soc_component *component; struct snd_pcm *pcm; char new_name[64]; int ret = 0, playback = 0, capture = 0; @@ -2755,18 +2746,17 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num) if (capture) snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops); - list_for_each_entry(component, &rtd->card->component_dev_list, card_list) { - if (component->pcm_new) { - ret = component->pcm_new(rtd); - if (ret < 0) { - dev_err(component->dev, - "ASoC: pcm constructor failed: %d\n", - ret); - return ret; - } + if (platform->driver->pcm_new) { + ret = platform->driver->pcm_new(rtd); + if (ret < 0) { + dev_err(platform->dev, + "ASoC: pcm constructor failed: %d\n", + ret); + return ret; } } - pcm->private_free = soc_pcm_free; + + pcm->private_free = platform->driver->pcm_free; out: dev_info(rtd->card->dev, "%s <-> %s mapping ok\n", (rtd->num_codecs > 1) ? "multicodec" : rtd->codec_dai->name, diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c index b50f68a439ce..ba9fc099cf67 100644 --- a/sound/soc/ux500/mop500.c +++ b/sound/soc/ux500/mop500.c @@ -33,6 +33,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = { .stream_name = "ab8500_0", .cpu_dai_name = "ux500-msp-i2s.1", .codec_dai_name = "ab8500-codec-dai.0", + .platform_name = "ux500-msp-i2s.1", .codec_name = "ab8500-codec.0", .init = mop500_ab8500_machine_init, .ops = mop500_ab8500_ops, @@ -42,6 +43,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = { .stream_name = "ab8500_1", .cpu_dai_name = "ux500-msp-i2s.3", .codec_dai_name = "ab8500-codec-dai.1", + .platform_name = "ux500-msp-i2s.3", .codec_name = "ab8500-codec.0", .init = NULL, .ops = mop500_ab8500_ops, @@ -85,6 +87,8 @@ static int mop500_of_probe(struct platform_device *pdev, for (i = 0; i < 2; i++) { mop500_dai_links[i].cpu_of_node = msp_np[i]; mop500_dai_links[i].cpu_dai_name = NULL; + mop500_dai_links[i].platform_of_node = msp_np[i]; + mop500_dai_links[i].platform_name = NULL; mop500_dai_links[i].codec_of_node = codec_np; mop500_dai_links[i].codec_name = NULL; } diff --git a/sound/soc/zte/zx-i2s.c b/sound/soc/zte/zx-i2s.c index a865f37c2a56..580902fd9030 100644 --- a/sound/soc/zte/zx-i2s.c +++ b/sound/soc/zte/zx-i2s.c @@ -203,13 +203,15 @@ static int zx_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: - i2s->master = 1; - val |= ZX_I2S_TIMING_MAST; - break; - case SND_SOC_DAIFMT_CBS_CFS: + /* Codec is master, and I2S is slave. */ i2s->master = 0; val |= ZX_I2S_TIMING_SLAVE; break; + case SND_SOC_DAIFMT_CBS_CFS: + /* Codec is slave, and I2S is master. */ + i2s->master = 1; + val |= ZX_I2S_TIMING_MAST; + break; default: dev_err(cpu_dai->dev, "Unknown master/slave format\n"); return -EINVAL; diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 082736c539bc..e630813c5008 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -542,6 +542,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag, if (size < sizeof(scale)) return -ENOMEM; + if (cval->min_mute) + scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE; scale[2] = cval->dBmin; scale[3] = cval->dBmax; if (copy_to_user(_tlv, scale, sizeof(scale))) diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 3417ef347e40..2b4b067646ab 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -64,6 +64,7 @@ struct usb_mixer_elem_info { int cached; int cache_val[MAX_CHANNELS]; u8 initialized; + u8 min_mute; void *private_data; }; diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 4fa0053a40af..7fbc90f5c6de 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -1878,6 +1878,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, if (unitid == 7 && cval->control == UAC_FU_VOLUME) snd_dragonfly_quirk_db_scale(mixer, cval, kctl); break; + /* lowest playback value is muted on C-Media devices */ + case USB_ID(0x0d8c, 0x000c): + case USB_ID(0x0d8c, 0x0014): + if (strstr(kctl->id.name, "Playback")) + cval->min_mute = 1; + break; } } diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index d7b0b0a3a2db..5d2a63248b1d 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1142,6 +1142,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */ + case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */ case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */ @@ -1308,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); - /* Zoom R16/24 needs a tiny delay here, otherwise requests like - * get/set frequency return as failed despite actually succeeding. + /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here, + * otherwise requests like get/set frequency return as failed despite + * actually succeeding. */ - if (chip->usb_id == USB_ID(0x1686, 0x00dd) && + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || + chip->usb_id == USB_ID(0x046d, 0x0a46) || + chip->usb_id == USB_ID(0x0b0e, 0x0349)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(1); } @@ -1374,6 +1378,10 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, } } break; + case USB_ID(0x16d0, 0x0a23): + if (fp->altsetting == 2) + return SNDRV_PCM_FMTBIT_DSD_U32_BE; + break; default: break; diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c index b11d3920b9a5..48d846da14c1 100644 --- a/sound/x86/intel_hdmi_audio.c +++ b/sound/x86/intel_hdmi_audio.c @@ -1665,6 +1665,11 @@ static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev) static void hdmi_lpe_audio_free(struct snd_card *card) { struct snd_intelhad *ctx = card->private_data; + struct intel_hdmi_lpe_audio_pdata *pdata = ctx->dev->platform_data; + + spin_lock_irq(&pdata->lpe_audio_slock); + pdata->notify_audio_lpe = NULL; + spin_unlock_irq(&pdata->lpe_audio_slock); cancel_work_sync(&ctx->hdmi_audio_wq); diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h index c808c7d02d21..d30214221920 100644 --- a/tools/lib/lockdep/uinclude/linux/lockdep.h +++ b/tools/lib/lockdep/uinclude/linux/lockdep.h @@ -8,7 +8,7 @@ #include <linux/utsname.h> #include <linux/compiler.h> -#define MAX_LOCK_DEPTH 2000UL +#define MAX_LOCK_DEPTH 63UL #define asmlinkage #define __visible diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c index a4d3762cd825..83874b0e266c 100644 --- a/tools/perf/ui/browser.c +++ b/tools/perf/ui/browser.c @@ -704,7 +704,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser, ui_browser__gotorc(browser, row, column + 1); SLsmg_draw_hline(2); - if (row++ == 0) + if (++row == 0) goto out; } else row = 0; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 7cf7f7aca4d2..a97710162e07 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -64,6 +64,25 @@ enum intel_pt_pkt_state { INTEL_PT_STATE_FUP_NO_TIP, }; +static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state) +{ + switch (pkt_state) { + case INTEL_PT_STATE_NO_PSB: + case INTEL_PT_STATE_NO_IP: + case INTEL_PT_STATE_ERR_RESYNC: + case INTEL_PT_STATE_IN_SYNC: + case INTEL_PT_STATE_TNT: + return true; + case INTEL_PT_STATE_TIP: + case INTEL_PT_STATE_TIP_PGD: + case INTEL_PT_STATE_FUP: + case INTEL_PT_STATE_FUP_NO_TIP: + return false; + default: + return true; + }; +} + #ifdef INTEL_PT_STRICT #define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB @@ -92,6 +111,7 @@ struct intel_pt_decoder { bool have_tma; bool have_cyc; bool fixup_last_mtc; + bool have_last_ip; uint64_t pos; uint64_t last_ip; uint64_t ip; @@ -99,6 +119,7 @@ struct intel_pt_decoder { uint64_t timestamp; uint64_t tsc_timestamp; uint64_t ref_timestamp; + uint64_t sample_timestamp; uint64_t ret_addr; uint64_t ctc_timestamp; uint64_t ctc_delta; @@ -139,6 +160,7 @@ struct intel_pt_decoder { unsigned int fup_tx_flags; unsigned int tx_flags; uint64_t timestamp_insn_cnt; + uint64_t sample_insn_cnt; uint64_t stuck_ip; int no_progress; int stuck_ip_prd; @@ -398,6 +420,7 @@ static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet, static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) { decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip); + decoder->have_last_ip = true; } static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) @@ -898,6 +921,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder, decoder->tot_insn_cnt += insn_cnt; decoder->timestamp_insn_cnt += insn_cnt; + decoder->sample_insn_cnt += insn_cnt; decoder->period_insn_cnt += insn_cnt; if (err) { @@ -1446,7 +1470,8 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder) case INTEL_PT_FUP: decoder->pge = true; - intel_pt_set_last_ip(decoder); + if (decoder->packet.count) + intel_pt_set_last_ip(decoder); break; case INTEL_PT_MODE_TSX: @@ -1650,6 +1675,8 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) break; case INTEL_PT_PSB: + decoder->last_ip = 0; + decoder->have_last_ip = true; intel_pt_clear_stack(&decoder->stack); err = intel_pt_walk_psbend(decoder); if (err == -EAGAIN) @@ -1730,8 +1757,9 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder) { - return decoder->last_ip || decoder->packet.count == 0 || - decoder->packet.count == 3 || decoder->packet.count == 6; + return decoder->packet.count && + (decoder->have_last_ip || decoder->packet.count == 3 || + decoder->packet.count == 6); } /* Walk PSB+ packets to get in sync. */ @@ -1854,14 +1882,10 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) break; case INTEL_PT_FUP: - if (decoder->overflow) { - if (intel_pt_have_ip(decoder)) - intel_pt_set_ip(decoder); - if (decoder->ip) - return 0; - } - if (decoder->packet.count) - intel_pt_set_last_ip(decoder); + if (intel_pt_have_ip(decoder)) + intel_pt_set_ip(decoder); + if (decoder->ip) + return 0; break; case INTEL_PT_MTC: @@ -1910,6 +1934,9 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) break; case INTEL_PT_PSB: + decoder->last_ip = 0; + decoder->have_last_ip = true; + intel_pt_clear_stack(&decoder->stack); err = intel_pt_walk_psb(decoder); if (err) return err; @@ -1935,6 +1962,8 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) { int err; + decoder->set_fup_tx_flags = false; + intel_pt_log("Scanning for full IP\n"); err = intel_pt_walk_to_ip(decoder); if (err) @@ -2043,6 +2072,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) decoder->pge = false; decoder->continuous_period = false; + decoder->have_last_ip = false; decoder->last_ip = 0; decoder->ip = 0; intel_pt_clear_stack(&decoder->stack); @@ -2051,6 +2081,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) if (err) return err; + decoder->have_last_ip = true; decoder->pkt_state = INTEL_PT_STATE_NO_IP; err = intel_pt_walk_psb(decoder); @@ -2069,7 +2100,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder) { - uint64_t est = decoder->timestamp_insn_cnt << 1; + uint64_t est = decoder->sample_insn_cnt << 1; if (!decoder->cbr || !decoder->max_non_turbo_ratio) goto out; @@ -2077,7 +2108,7 @@ static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder) est *= decoder->max_non_turbo_ratio; est /= decoder->cbr; out: - return decoder->timestamp + est; + return decoder->sample_timestamp + est; } const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) @@ -2093,7 +2124,9 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) err = intel_pt_sync(decoder); break; case INTEL_PT_STATE_NO_IP: + decoder->have_last_ip = false; decoder->last_ip = 0; + decoder->ip = 0; /* Fall through */ case INTEL_PT_STATE_ERR_RESYNC: err = intel_pt_sync_ip(decoder); @@ -2130,15 +2163,24 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) } } while (err == -ENOLINK); - decoder->state.err = err ? intel_pt_ext_err(err) : 0; - decoder->state.timestamp = decoder->timestamp; + if (err) { + decoder->state.err = intel_pt_ext_err(err); + decoder->state.from_ip = decoder->ip; + decoder->sample_timestamp = decoder->timestamp; + decoder->sample_insn_cnt = decoder->timestamp_insn_cnt; + } else { + decoder->state.err = 0; + if (intel_pt_sample_time(decoder->pkt_state)) { + decoder->sample_timestamp = decoder->timestamp; + decoder->sample_insn_cnt = decoder->timestamp_insn_cnt; + } + } + + decoder->state.timestamp = decoder->sample_timestamp; decoder->state.est_timestamp = intel_pt_est_timestamp(decoder); decoder->state.cr3 = decoder->cr3; decoder->state.tot_insn_cnt = decoder->tot_insn_cnt; - if (err) - decoder->state.from_ip = decoder->ip; - return &decoder->state; } diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index c2187178fb13..548b6d4ee1be 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -1943,7 +1943,7 @@ static __init int nfit_test_init(void) nfit_test->setup = nfit_test0_setup; break; case 1: - nfit_test->num_pm = 1; + nfit_test->num_pm = 2; nfit_test->dcr_idx = NUM_DCR; nfit_test->num_dcr = 2; nfit_test->alloc = nfit_test1_alloc; diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c index 10a21a958aaf..763f37fecfb8 100644 --- a/tools/testing/selftests/capabilities/test_execve.c +++ b/tools/testing/selftests/capabilities/test_execve.c @@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void) if (chdir(cwd) != 0) err(1, "chdir to private tmpfs"); - - if (umount2(".", MNT_DETACH) != 0) - err(1, "detach private tmpfs"); } static void copy_fromat_to(int fromfd, const char *fromname, const char *toname) @@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path) err(1, "chown"); if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0) err(1, "chmod"); -} + } capng_get_caps_process(); @@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path) } else { printf("[RUN]\tNon-root +ia, sgidnonroot => i\n"); exec_other_validate_cap("./validate_cap_sgidnonroot", - false, false, true, false); + false, false, true, false); if (fork_wait()) { printf("[RUN]\tNon-root +ia, sgidroot => i\n"); diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh index 13f5198ba0ee..b3c48fc6ea4b 100755 --- a/tools/testing/selftests/ntb/ntb_test.sh +++ b/tools/testing/selftests/ntb/ntb_test.sh @@ -326,6 +326,10 @@ function ntb_tool_tests() link_test $LOCAL_TOOL $REMOTE_TOOL link_test $REMOTE_TOOL $LOCAL_TOOL + #Ensure the link is up on both sides before continuing + write_file Y $LOCAL_TOOL/link_event + write_file Y $REMOTE_TOOL/link_event + for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do PT=$(basename $PEER_TRANS) write_file $MW_SIZE $LOCAL_TOOL/$PT diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 3417e184c8e1..a43012587fcc 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1115,9 +1115,6 @@ static void cpu_init_hyp_mode(void *dummy) __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); __cpu_init_stage2(); - if (is_kernel_in_hyp_mode()) - kvm_timer_init_vhe(); - kvm_arm_init_debug(); } @@ -1137,6 +1134,7 @@ static void cpu_hyp_reinit(void) * event was cancelled before the CPU was reset. */ __cpu_init_stage2(); + kvm_timer_init_vhe(); } else { cpu_init_hyp_mode(NULL); } diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index e2e5effba2a9..db1c7b25a44c 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c @@ -1665,12 +1665,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void * int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) { + if (!kvm->arch.pgd) + return 0; trace_kvm_age_hva(start, end); return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); } int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) { + if (!kvm->arch.pgd) + return 0; trace_kvm_test_age_hva(hva); return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); } diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 37d9118fd84b..d99850c462a1 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -51,6 +51,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep) return vfio_group; } +static bool kvm_vfio_external_group_match_file(struct vfio_group *group, + struct file *filep) +{ + bool ret, (*fn)(struct vfio_group *, struct file *); + + fn = symbol_get(vfio_external_group_match_file); + if (!fn) + return false; + + ret = fn(group, filep); + + symbol_put(vfio_external_group_match_file); + + return ret; +} + static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group) { void (*fn)(struct vfio_group *); @@ -231,37 +247,31 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg) if (!f.file) return -EBADF; - vfio_group = kvm_vfio_group_get_external_user(f.file); - fdput(f); - - if (IS_ERR(vfio_group)) - return PTR_ERR(vfio_group); - ret = -ENOENT; mutex_lock(&kv->lock); list_for_each_entry(kvg, &kv->group_list, node) { - if (kvg->vfio_group != vfio_group) + if (!kvm_vfio_external_group_match_file(kvg->vfio_group, + f.file)) continue; list_del(&kvg->node); + kvm_arch_end_assignment(dev->kvm); +#ifdef CONFIG_SPAPR_TCE_IOMMU + kvm_spapr_tce_release_vfio_group(dev->kvm, + kvg->vfio_group); +#endif + kvm_vfio_group_set_kvm(kvg->vfio_group, NULL); kvm_vfio_group_put_external_user(kvg->vfio_group); kfree(kvg); ret = 0; break; } - kvm_arch_end_assignment(dev->kvm); - mutex_unlock(&kv->lock); -#ifdef CONFIG_SPAPR_TCE_IOMMU - kvm_spapr_tce_release_vfio_group(dev->kvm, vfio_group); -#endif - kvm_vfio_group_set_kvm(vfio_group, NULL); - - kvm_vfio_group_put_external_user(vfio_group); + fdput(f); kvm_vfio_update_coherency(dev);