diff --git a/Makefile b/Makefile index db2cd3f1..5cc8641 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 8 SUBLEVEL = 13 -EXTRAVERSION = .26 +EXTRAVERSION = .27 NAME = Remoralised Urchins Update # *DOCUMENTATION* diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index af4e8c8..6582c4a 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d) return trace->nr_entries >= trace->max_entries; } -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +/* This must be noinline to so that our skip calculation works correctly */ +static noinline void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) { struct stack_trace_data data; struct stackframe frame; data.trace = trace; data.skip = trace->skip; + data.no_sched_functions = nosched; if (tsk != current) { #ifdef CONFIG_SMP @@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; return; #else - data.no_sched_functions = 1; frame.fp = thread_saved_fp(tsk); frame.sp = thread_saved_sp(tsk); frame.lr = 0; /* recovered from the stack */ @@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) } else { register unsigned long current_sp asm ("sp"); - data.no_sched_functions = 0; + /* We don't want this function nor the caller */ + data.skip += 2; frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; frame.lr = (unsigned long)__builtin_return_address(0); - frame.pc = (unsigned long)save_stack_trace_tsk; + frame.pc = (unsigned long)__save_stack_trace; } walk_stackframe(&frame, save_trace, &data); @@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} + void save_stack_trace(struct stack_trace *trace) { - save_stack_trace_tsk(current, trace); + __save_stack_trace(current, trace, 0); } EXPORT_SYMBOL_GPL(save_stack_trace); #endif diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 14a9d5a..9357d33 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -27,7 +27,6 @@ generic-y += mutex.h generic-y += pci.h generic-y += percpu.h generic-y += poll.h -generic-y += posix_types.h generic-y += resource.h generic-y += scatterlist.h generic-y += sections.h diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 9947768..3699d1d 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -23,8 +23,6 @@ #include <asm-generic/dma-coherent.h> -#define ARCH_HAS_DMA_GET_REQUIRED_MASK - extern struct dma_map_ops *dma_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h new file mode 100644 index 0000000..7985ff6 --- /dev/null +++ b/arch/arm64/include/uapi/asm/posix_types.h @@ -0,0 +1,10 @@ +#ifndef __ASM_POSIX_TYPES_H +#define __ASM_POSIX_TYPES_H + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +#define __kernel_old_uid_t __kernel_old_uid_t + +#include <asm-generic/posix_types.h> + +#endif /* __ASM_POSIX_TYPES_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9c94f40..9e3d509 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -274,7 +274,6 @@ el1_sp_pc: * Stack or PC alignment exception handling */ mrs x0, far_el1 - mov x1, x25 mov x2, sp b do_sp_pc_abort el1_undef: diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 5341534..8553668 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -872,6 +872,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, compat_ulong_t val) { int ret; + mm_segment_t old_fs = get_fs(); if (off & 3 || off >= COMPAT_USER_SZ) return -EIO; @@ -879,10 +880,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, if (off >= sizeof(compat_elf_gregset_t)) return 0; + set_fs(KERNEL_DS); ret = copy_regset_from_user(tsk, &user_aarch32_view, REGSET_COMPAT_GPR, off, sizeof(compat_ulong_t), &val); + set_fs(old_fs); + return ret; } diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 14ac52c..884de34 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -131,7 +131,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma board_bind_eic_interrupt = &msc_bind_eic_interrupt; - for (; nirq >= 0; nirq--, imp++) { + for (; nirq > 0; nirq--, imp++) { int n = imp->im_irq; switch (imp->im_type) { diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts index 68c1731..de53a1b 100644 --- a/arch/mips/lantiq/dts/easy50712.dts +++ b/arch/mips/lantiq/dts/easy50712.dts @@ -8,6 +8,7 @@ }; memory@0 { + device_type = "memory"; reg = <0x0 0x2000000>; }; diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 97909d3b..4ca7fca 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -189,7 +189,7 @@ SYSCALL_SPU(getcwd) SYSCALL_SPU(capget) SYSCALL_SPU(capset) COMPAT_SYS(sigaltstack) -SYSX_SPU(sys_sendfile,compat_sys_sendfile_wrapper,sys_sendfile) +SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile) SYSCALL(ni_syscall) SYSCALL(ni_syscall) PPC_SYS(vfork) diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 0733b05..23af574 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -48,6 +48,9 @@ static struct __initdata of_device_id legacy_serial_parents[] = { static unsigned int legacy_serial_count; static int legacy_serial_console = -1; +static const upf_t legacy_port_flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | + UPF_SHARE_IRQ | UPF_FIXED_PORT; + static unsigned int tsi_serial_in(struct uart_port *p, int offset) { unsigned int tmp; @@ -153,8 +156,6 @@ static int __init add_legacy_soc_port(struct device_node *np, { u64 addr; const u32 *addrp; - upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ - | UPF_FIXED_PORT; struct device_node *tsi = of_get_parent(np); /* We only support ports that have a clock frequency properly @@ -185,9 +186,11 @@ static int __init add_legacy_soc_port(struct device_node *np, * IO port value. It will be fixed up later along with the irq */ if (tsi && !strcmp(tsi->type, "tsi-bridge")) - return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0); + return add_legacy_port(np, -1, UPIO_TSI, addr, addr, + NO_IRQ, legacy_port_flags, 0); else - return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); + return add_legacy_port(np, -1, UPIO_MEM, addr, addr, + NO_IRQ, legacy_port_flags, 0); } static int __init add_legacy_isa_port(struct device_node *np, @@ -228,7 +231,7 @@ static int __init add_legacy_isa_port(struct device_node *np, /* Add port, irq will be dealt with later */ return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr, - NO_IRQ, UPF_BOOT_AUTOCONF, 0); + NO_IRQ, legacy_port_flags, 0); } @@ -301,7 +304,7 @@ static int __init add_legacy_pci_port(struct device_node *np, * IO port value. It will be fixed up later along with the irq */ return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, - UPF_BOOT_AUTOCONF, np != pci_dev); + legacy_port_flags, np != pci_dev); } #endif diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index bdc499c..069f857 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -455,9 +455,17 @@ void __init smp_setup_cpu_maps(void) } for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { + bool avail; + DBG(" thread %d -> cpu %d (hard id %d)\n", j, cpu, intserv[j]); - set_cpu_present(cpu, true); + + avail = of_device_is_available(dn); + if (!avail) + avail = !of_property_match_string(dn, + "enable-method", "spin-table"); + + set_cpu_present(cpu, avail); set_hard_smp_processor_id(cpu, intserv[j]); set_cpu_possible(cpu, true); cpu++; diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index b456b15..68f97d5 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -400,6 +400,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state) } else { result = EEH_STATE_NOT_SUPPORT; } + break; default: result = EEH_STATE_NOT_SUPPORT; } diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index bbf8141..2bed4f0 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -142,9 +142,9 @@ struct _lowcore { __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ /* Interrupt response block */ - __u8 irb[64]; /* 0x0300 */ + __u8 irb[96]; /* 0x0300 */ - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ + __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -288,12 +288,13 @@ struct _lowcore { __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ /* Interrupt response block. */ - __u8 irb[64]; /* 0x0400 */ + __u8 irb[96]; /* 0x0400 */ + __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */ /* Per cpu primary space access list */ - __u32 paste[16]; /* 0x0440 */ + __u32 paste[16]; /* 0x0480 */ - __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ + __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 1d7fdbe..60d03c2 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -554,11 +554,6 @@ ENTRY(iret_exc) CFI_RESTORE_STATE ldt_ss: - larl PT_OLDSS(%esp), %eax - jnz restore_nocheck - testl $0x00400000, %eax # returning to 32bit stack? - jnz restore_nocheck # allright, normal return - #ifdef CONFIG_PARAVIRT /* * The kernel can't run on a non-flat stack if paravirt mode diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index dc97328..0a42b1e 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl @@ -212,10 +212,10 @@ 203 common sched_setaffinity sys_sched_setaffinity 204 common sched_getaffinity sys_sched_getaffinity 205 64 set_thread_area -206 common io_setup sys_io_setup +206 64 io_setup sys_io_setup 207 common io_destroy sys_io_destroy 208 common io_getevents sys_io_getevents -209 common io_submit sys_io_submit +209 64 io_submit sys_io_submit 210 common io_cancel sys_io_cancel 211 64 get_thread_area 212 common lookup_dcookie sys_lookup_dcookie @@ -356,3 +356,5 @@ 540 x32 process_vm_writev compat_sys_process_vm_writev 541 x32 setsockopt compat_sys_setsockopt 542 x32 getsockopt compat_sys_getsockopt +543 x32 io_setup compat_sys_io_setup +544 x32 io_submit compat_sys_io_submit diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index de22f48..5e9ad05 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -57,6 +57,12 @@ EXPORT_SYMBOL(acpi_root_dir); #ifdef CONFIG_X86 +#ifdef CONFIG_ACPI_CUSTOM_DSDT +static inline int set_copy_dsdt(const struct dmi_system_id *id) +{ + return 0; +} +#else static int set_copy_dsdt(const struct dmi_system_id *id) { printk(KERN_NOTICE "%s detected - " @@ -64,6 +70,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id) acpi_gbl_copy_dsdt_locally = 1; return 0; } +#endif static struct dmi_system_id dsdt_dmi_table[] __initdata = { /* diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index abcae69..c81409a 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -172,6 +172,14 @@ static struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), }, }, + { + .callback = video_detect_force_vendor, + .ident = "Dell Inspiron 5737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), + }, + }, { }, }; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index f0faa0d..a08cb7a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -440,10 +440,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(0x1b4b, 0x9192), .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ + { PCI_DEVICE(0x1b4b, 0x91a0), + .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(0x1b4b, 0x91a3), .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(0x1b4b, 0x9230), .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), + .driver_data = board_ahci_yes_fbs }, /* Promise */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 50b2831..7461649 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -728,11 +728,9 @@ int of_init_opp_table(struct device *dev) unsigned long freq = be32_to_cpup(val++) * 1000; unsigned long volt = be32_to_cpup(val++); - if (opp_add(dev, freq, volt)) { + if (opp_add(dev, freq, volt)) dev_warn(dev, "%s: Failed to add OPP %ld\n", __func__, freq); - continue; - } nr -= 2; } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index ca69cf9..d2a3f3d 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -1419,6 +1419,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len) be16_to_cpus(&buf[i]); } +static void mtip_set_timeout(struct driver_data *dd, + struct host_to_dev_fis *fis, + unsigned int *timeout, u8 erasemode) +{ + switch (fis->command) { + case ATA_CMD_DOWNLOAD_MICRO: + *timeout = 120000; /* 2 minutes */ + break; + case ATA_CMD_SEC_ERASE_UNIT: + case 0xFC: + if (erasemode) + *timeout = ((*(dd->port->identify + 90) * 2) * 60000); + else + *timeout = ((*(dd->port->identify + 89) * 2) * 60000); + break; + case ATA_CMD_STANDBYNOW1: + *timeout = 120000; /* 2 minutes */ + break; + case 0xF7: + case 0xFA: + *timeout = 60000; /* 60 seconds */ + break; + case ATA_CMD_SMART: + *timeout = 15000; /* 15 seconds */ + break; + default: + *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; + break; + } +} + /* * Request the device identity information. * @@ -1520,6 +1551,7 @@ static int mtip_standby_immediate(struct mtip_port *port) int rv; struct host_to_dev_fis fis; unsigned long start; + unsigned int timeout; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); @@ -1527,6 +1559,8 @@ static int mtip_standby_immediate(struct mtip_port *port) fis.opts = 1 << 7; fis.command = ATA_CMD_STANDBYNOW1; + mtip_set_timeout(port->dd, &fis, &timeout, 0); + start = jiffies; rv = mtip_exec_internal_command(port, &fis, @@ -1535,7 +1569,7 @@ static int mtip_standby_immediate(struct mtip_port *port) 0, 0, GFP_ATOMIC, - 15000); + timeout); dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", jiffies_to_msecs(jiffies - start)); if (rv) @@ -2037,36 +2071,6 @@ static unsigned int implicit_sector(unsigned char command, } return rv; } -static void mtip_set_timeout(struct driver_data *dd, - struct host_to_dev_fis *fis, - unsigned int *timeout, u8 erasemode) -{ - switch (fis->command) { - case ATA_CMD_DOWNLOAD_MICRO: - *timeout = 120000; /* 2 minutes */ - break; - case ATA_CMD_SEC_ERASE_UNIT: - case 0xFC: - if (erasemode) - *timeout = ((*(dd->port->identify + 90) * 2) * 60000); - else - *timeout = ((*(dd->port->identify + 89) * 2) * 60000); - break; - case ATA_CMD_STANDBYNOW1: - *timeout = 120000; /* 2 minutes */ - break; - case 0xF7: - case 0xFA: - *timeout = 60000; /* 60 seconds */ - break; - case ATA_CMD_SMART: - *timeout = 15000; /* 15 seconds */ - break; - default: - *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; - break; - } -} /* * Executes a taskfile @@ -3964,6 +3968,57 @@ static int mtip_block_resume(struct driver_data *dd) return 0; } +static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) +{ + int pos; + unsigned short pcie_dev_ctrl; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(pdev, + pos + PCI_EXP_DEVCTL, + &pcie_dev_ctrl); + if (pcie_dev_ctrl & (1 << 11) || + pcie_dev_ctrl & (1 << 4)) { + dev_info(&dd->pdev->dev, + "Disabling ERO/No-Snoop on bridge device %04x:%04x\n", + pdev->vendor, pdev->device); + pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN | + PCI_EXP_DEVCTL_RELAX_EN); + pci_write_config_word(pdev, + pos + PCI_EXP_DEVCTL, + pcie_dev_ctrl); + } + } +} + +static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev) +{ + /* + * This workaround is specific to AMD/ATI chipset with a PCI upstream + * device with device id 0x5aXX + */ + if (pdev->bus && pdev->bus->self) { + if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI && + ((pdev->bus->self->device & 0xff00) == 0x5a00)) { + mtip_disable_link_opts(dd, pdev->bus->self); + } else { + /* Check further up the topology */ + struct pci_dev *parent_dev = pdev->bus->self; + if (parent_dev->bus && + parent_dev->bus->parent && + parent_dev->bus->parent->self && + parent_dev->bus->parent->self->vendor == + PCI_VENDOR_ID_ATI && + (parent_dev->bus->parent->self->device & + 0xff00) == 0x5a00) { + mtip_disable_link_opts(dd, + parent_dev->bus->parent->self); + } + } + } +} + /* * Called for each supported PCI device detected. * @@ -4025,11 +4080,14 @@ static int mtip_pci_probe(struct pci_dev *pdev, goto block_initialize_err; } + /* Copy the info we may need later into the private data structure. */ dd->major = mtip_major; dd->instance = instance; dd->pdev = pdev; + mtip_fix_ero_nosnoop(dd, pdev); + /* Initialize the block layer. */ rv = mtip_block_initialize(dd); if (rv < 0) { @@ -4278,13 +4336,13 @@ static int __init mtip_init(void) */ static void __exit mtip_exit(void) { - debugfs_remove_recursive(dfs_parent); - /* Release the allocated major block device number. */ unregister_blkdev(mtip_major, MTIP_DRV_NAME); /* Unregister the PCI driver. */ pci_unregister_driver(&mtip_pci_driver); + + debugfs_remove_recursive(dfs_parent); } MODULE_AUTHOR("Micron Technology, Inc"); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index ed0fade..97deda4 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) int hci_uart_tx_wakeup(struct hci_uart *hu) { - struct tty_struct *tty = hu->tty; - struct hci_dev *hdev = hu->hdev; - struct sk_buff *skb; - if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) { set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); return 0; @@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) BT_DBG(""); + schedule_work(&hu->write_work); + + return 0; +} + +static void hci_uart_write_work(struct work_struct *work) +{ + struct hci_uart *hu = container_of(work, struct hci_uart, write_work); + struct tty_struct *tty = hu->tty; + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + + /* REVISIT: should we cope with bad skbs or ->write() returning + * and error value ? + */ + restart: clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); @@ -153,7 +165,6 @@ restart: goto restart; clear_bit(HCI_UART_SENDING, &hu->tx_state); - return 0; } static void hci_uart_init_work(struct work_struct *work) @@ -289,6 +300,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) tty->receive_room = 65536; INIT_WORK(&hu->init_ready, hci_uart_init_work); + INIT_WORK(&hu->write_work, hci_uart_write_work); spin_lock_init(&hu->rx_lock); @@ -326,6 +338,8 @@ static void hci_uart_tty_close(struct tty_struct *tty) if (hdev) hci_uart_close(hdev); + cancel_work_sync(&hu->write_work); + if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { if (hdev) { if (test_bit(HCI_UART_REGISTERED, &hu->flags)) diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index fffa61f..12df101 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -68,6 +68,7 @@ struct hci_uart { unsigned long hdev_flags; struct work_struct init_ready; + struct work_struct write_work; struct hci_uart_proto *proto; void *priv; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b9ff6e2..bbcc441 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -839,14 +839,16 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_REF_DIV_SRC; - switch (bpc) { - case 8: - default: - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; - break; - case 10: - args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; - break; + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP; + break; + case 10: + args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP; + break; + } } args.v5.ucTransmitterID = encoder_id; args.v5.ucEncoderMode = encoder_mode; @@ -861,20 +863,22 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK)) args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_REF_DIV_SRC; - switch (bpc) { - case 8: - default: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; - break; - case 10: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; - break; - case 12: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; - break; - case 16: - args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; - break; + if (encoder_mode == ATOM_ENCODER_MODE_HDMI) { + switch (bpc) { + case 8: + default: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP; + break; + case 10: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP; + break; + case 12: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP; + break; + case 16: + args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP; + break; + } } args.v6.ucTransmitterID = encoder_id; args.v6.ucEncoderMode = encoder_mode; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 1602398..4c05f2b 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -384,6 +384,19 @@ static int dp_get_max_dp_pix_clock(int link_rate, /***** radeon specific DP functions *****/ +static int radeon_dp_get_max_link_rate(struct drm_connector *connector, + u8 dpcd[DP_DPCD_SIZE]) +{ + int max_link_rate; + + if (radeon_connector_is_dp12_capable(connector)) + max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000); + else + max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000); + + return max_link_rate; +} + /* First get the min lane# when low rate is used according to pixel clock * (prefer low rate), second check max lane# supported by DP panel, * if the max lane# < low rate lane# then use max lane# instead. @@ -393,7 +406,7 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector, int pix_clock) { int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); - int max_link_rate = drm_dp_max_link_rate(dpcd); + int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd); int max_lane_num = drm_dp_max_lane_count(dpcd); int lane_num; int max_dp_pix_clock; @@ -431,7 +444,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector, return 540000; } - return drm_dp_max_link_rate(dpcd); + return radeon_dp_get_max_link_rate(connector, dpcd); } static u8 radeon_dp_encoder_service(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 71619b0..f04f3c9 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1877,8 +1877,11 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; else args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); - } else + } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; + } else { args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); + } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5a87c9f..fc604fc 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1345,7 +1345,7 @@ bool radeon_connector_is_dp12_capable(struct drm_connector *connector) struct radeon_device *rdev = dev->dev_private; if (ASIC_IS_DCE5(rdev) && - (rdev->clock.dp_extclk >= 53900) && + (rdev->clock.default_dispclk >= 53900) && radeon_connector_encoder_is_hbr2(connector)) { return true; } diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 14744e0..8ad2505 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -797,7 +797,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid, * ->numbered being checked, which may not always be the case when * drivers go to access report values. */ - report = hid->report_enum[type].report_id_hash[id]; + if (id == 0) { + /* + * Validating on id 0 means we should examine the first + * report in the list. + */ + report = list_entry( + hid->report_enum[type].report_list.next, + struct hid_report, list); + } else { + report = hid->report_enum[type].report_id_hash[id]; + } if (!report) { hid_err(hid, "missing %s %u\n", hid_report_names[type], id); return NULL; diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index a526c0e..aa78185 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -157,12 +157,11 @@ static int at91_adc_channel_init(struct iio_dev *idev) return idev->num_channels; } -static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev, +static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev, struct at91_adc_trigger *triggers, const char *trigger_name) { struct at91_adc_state *st = iio_priv(idev); - u8 value = 0; int i; for (i = 0; i < st->trigger_number; i++) { @@ -175,15 +174,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev, return -ENOMEM; if (strcmp(trigger_name, name) == 0) { - value = triggers[i].value; kfree(name); - break; + if (triggers[i].value == 0) + return -EINVAL; + return triggers[i].value; } kfree(name); } - return value; + return -EINVAL; } static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) @@ -193,14 +193,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state) struct iio_buffer *buffer = idev->buffer; struct at91_adc_reg_desc *reg = st->registers; u32 status = at91_adc_readl(st, reg->trigger_register); - u8 value; + int value; u8 bit; value = at91_adc_get_trigger_value_by_name(idev, st->trigger_list, idev->trig->name); - if (value == 0) - return -EINVAL; + if (value < 0) + return value; if (state) { st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL); diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index 03b25b3..96670f9 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -1211,8 +1211,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1038_channels, + .num_channels = ARRAY_SIZE(max1038_channels), }, [max11605] = { .bits = 8, @@ -1221,8 +1221,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1038_channels, + .num_channels = ARRAY_SIZE(max1038_channels), }, [max11606] = { .bits = 10, @@ -1271,8 +1271,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1138_channels, + .num_channels = ARRAY_SIZE(max1138_channels), }, [max11611] = { .bits = 10, @@ -1281,8 +1281,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { .num_modes = ARRAY_SIZE(max1238_mode_list), .default_mode = s0to11, .info = &max1238_info, - .channels = max1238_channels, - .num_channels = ARRAY_SIZE(max1238_channels), + .channels = max1138_channels, + .num_channels = ARRAY_SIZE(max1138_channels), }, [max11612] = { .bits = 12, diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index f0d588f..1acb991 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -98,7 +98,7 @@ struct ib_umad_port { struct ib_umad_device { int start_port, end_port; - struct kref ref; + struct kobject kobj; struct ib_umad_port port[0]; }; @@ -134,14 +134,18 @@ static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); static void ib_umad_add_one(struct ib_device *device); static void ib_umad_remove_one(struct ib_device *device); -static void ib_umad_release_dev(struct kref *ref) +static void ib_umad_release_dev(struct kobject *kobj) { struct ib_umad_device *dev = - container_of(ref, struct ib_umad_device, ref); + container_of(kobj, struct ib_umad_device, kobj); kfree(dev); } +static struct kobj_type ib_umad_dev_ktype = { + .release = ib_umad_release_dev, +}; + static int hdr_size(struct ib_umad_file *file) { return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : @@ -780,27 +784,19 @@ static int ib_umad_open(struct inode *inode, struct file *filp) { struct ib_umad_port *port; struct ib_umad_file *file; - int ret; + int ret = -ENXIO; port = container_of(inode->i_cdev, struct ib_umad_port, cdev); - if (port) - kref_get(&port->umad_dev->ref); - else - return -ENXIO; mutex_lock(&port->file_mutex); - if (!port->ib_dev) { - ret = -ENXIO; + if (!port->ib_dev) goto out; - } + ret = -ENOMEM; file = kzalloc(sizeof *file, GFP_KERNEL); - if (!file) { - kref_put(&port->umad_dev->ref, ib_umad_release_dev); - ret = -ENOMEM; + if (!file) goto out; - } mutex_init(&file->mutex); spin_lock_init(&file->send_lock); @@ -814,6 +810,13 @@ static int ib_umad_open(struct inode *inode, struct file *filp) list_add_tail(&file->port_list, &port->file_list); ret = nonseekable_open(inode, filp); + if (ret) { + list_del(&file->port_list); + kfree(file); + goto out; + } + + kobject_get(&port->umad_dev->kobj); out: mutex_unlock(&port->file_mutex); @@ -852,7 +855,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp) mutex_unlock(&file->port->file_mutex); kfree(file); - kref_put(&dev->ref, ib_umad_release_dev); + kobject_put(&dev->kobj); return 0; } @@ -880,10 +883,6 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp) int ret; port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); - if (port) - kref_get(&port->umad_dev->ref); - else - return -ENXIO; if (filp->f_flags & O_NONBLOCK) { if (down_trylock(&port->sm_sem)) { @@ -898,17 +897,27 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp) } ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); - if (ret) { - up(&port->sm_sem); - goto fail; - } + if (ret) + goto err_up_sem; filp->private_data = port; - return nonseekable_open(inode, filp); + ret = nonseekable_open(inode, filp); + if (ret) + goto err_clr_sm_cap; + + kobject_get(&port->umad_dev->kobj); + + return 0; + +err_clr_sm_cap: + swap(props.set_port_cap_mask, props.clr_port_cap_mask); + ib_modify_port(port->ib_dev, port->port_num, 0, &props); + +err_up_sem: + up(&port->sm_sem); fail: - kref_put(&port->umad_dev->ref, ib_umad_release_dev); return ret; } @@ -927,7 +936,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp) up(&port->sm_sem); - kref_put(&port->umad_dev->ref, ib_umad_release_dev); + kobject_put(&port->umad_dev->kobj); return ret; } @@ -995,6 +1004,7 @@ static int find_overflow_devnum(void) } static int ib_umad_init_port(struct ib_device *device, int port_num, + struct ib_umad_device *umad_dev, struct ib_umad_port *port) { int devnum; @@ -1027,6 +1037,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, cdev_init(&port->cdev, &umad_fops); port->cdev.owner = THIS_MODULE; + port->cdev.kobj.parent = &umad_dev->kobj; kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); if (cdev_add(&port->cdev, base, 1)) goto err_cdev; @@ -1045,6 +1056,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, base += IB_UMAD_MAX_PORTS; cdev_init(&port->sm_cdev, &umad_sm_fops); port->sm_cdev.owner = THIS_MODULE; + port->sm_cdev.kobj.parent = &umad_dev->kobj; kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); if (cdev_add(&port->sm_cdev, base, 1)) goto err_sm_cdev; @@ -1138,7 +1150,7 @@ static void ib_umad_add_one(struct ib_device *device) if (!umad_dev) return; - kref_init(&umad_dev->ref); + kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); umad_dev->start_port = s; umad_dev->end_port = e; @@ -1146,7 +1158,8 @@ static void ib_umad_add_one(struct ib_device *device) for (i = s; i <= e; ++i) { umad_dev->port[i - s].umad_dev = umad_dev; - if (ib_umad_init_port(device, i, &umad_dev->port[i - s])) + if (ib_umad_init_port(device, i, umad_dev, + &umad_dev->port[i - s])) goto err; } @@ -1158,7 +1171,7 @@ err: while (--i >= s) ib_umad_kill_port(&umad_dev->port[i - s]); - kref_put(&umad_dev->ref, ib_umad_release_dev); + kobject_put(&umad_dev->kobj); } static void ib_umad_remove_one(struct ib_device *device) @@ -1172,7 +1185,7 @@ static void ib_umad_remove_one(struct ib_device *device) for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) ib_umad_kill_port(&umad_dev->port[i]); - kref_put(&umad_dev->ref, ib_umad_release_dev); + kobject_put(&umad_dev->kobj); } static char *umad_devnode(struct device *dev, umode_t *mode) diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 0f1607c..3bd6b69 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -843,7 +843,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, uresp.gts_key = ucontext->key; ucontext->key += PAGE_SIZE; spin_unlock(&ucontext->mmap_lock); - ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); + ret = ib_copy_to_udata(udata, &uresp, + sizeof(uresp) - sizeof(uresp.reserved)); if (ret) goto err5; diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h index 32b754c..4893599 100644 --- a/drivers/infiniband/hw/cxgb4/user.h +++ b/drivers/infiniband/hw/cxgb4/user.h @@ -48,6 +48,7 @@ struct c4iw_create_cq_resp { __u32 cqid; __u32 size; __u32 qid_mask; + __u32 reserved; /* explicit padding (optional for i386) */ }; diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index e2f9a51..45802e9 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c @@ -346,6 +346,10 @@ static ssize_t ipath_diagpkt_write(struct file *fp, ret = -EFAULT; goto bail; } + dp.len = odp.len; + dp.unit = odp.unit; + dp.data = odp.data; + dp.pbc_wd = 0; } else { ret = -EINVAL; goto bail; diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index ccb1191..1dd9fcb 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -1028,7 +1028,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) event.event = IB_EVENT_PKEY_CHANGE; event.device = &dd->verbs_dev.ibdev; - event.element.port_num = 1; + event.element.port_num = port; ib_dispatch_event(&event); } return 0; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e070166..5682e8e 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1410,6 +1410,12 @@ err_unmap: err_iu: srp_put_tx_iu(target, iu, SRP_IU_CMD); + /* + * Avoid that the loops that iterate over the request ring can + * encounter a dangling SCSI command pointer. + */ + req->scmnd = NULL; + spin_lock_irqsave(&target->lock, flags); list_add(&req->list, &target->free_reqs); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index c7bd58e..729f75d 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -473,8 +473,15 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse, input_report_key(dev, BTN_TOOL_FINGER, fingers == 1); input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2); input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3); - input_report_key(dev, BTN_LEFT, packet[0] & 0x01); - input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); + + /* For clickpads map both buttons to BTN_LEFT */ + if (etd->fw_version & 0x001000) { + input_report_key(dev, BTN_LEFT, packet[0] & 0x03); + } else { + input_report_key(dev, BTN_LEFT, packet[0] & 0x01); + input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); + } + input_report_abs(dev, ABS_PRESSURE, pres); input_report_abs(dev, ABS_TOOL_WIDTH, width); @@ -484,9 +491,17 @@ static void elantech_report_absolute_v3(struct psmouse *psmouse, static void elantech_input_sync_v4(struct psmouse *psmouse) { struct input_dev *dev = psmouse->dev; + struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; - input_report_key(dev, BTN_LEFT, packet[0] & 0x01); + /* For clickpads map both buttons to BTN_LEFT */ + if (etd->fw_version & 0x001000) { + input_report_key(dev, BTN_LEFT, packet[0] & 0x03); + } else { + input_report_key(dev, BTN_LEFT, packet[0] & 0x01); + input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); + } + input_mt_report_pointer_emulation(dev, true); input_sync(dev); } @@ -804,7 +819,7 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse) if (etd->set_hw_resolution) etd->reg_10 = 0x0b; else - etd->reg_10 = 0x03; + etd->reg_10 = 0x01; if (elantech_write_reg(psmouse, 0x10, etd->reg_10)) rc = -1; @@ -1267,7 +1282,8 @@ static int elantech_reconnect(struct psmouse *psmouse) } /* - * Some hw_version 3 models go into error state when we try to set bit 3 of r10 + * Some hw_version 3 models go into error state when we try to set + * bit 3 and/or bit 1 of r10. */ static const struct dmi_system_id no_hw_res_dmi_table[] = { #if defined(CONFIG_DMI) && defined(CONFIG_X86) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ca954b1..3d183ca 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2479,7 +2479,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) */ if (pt->adjusted_pf.discard_passdown) { data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; - limits->discard_granularity = data_limits->discard_granularity; + limits->discard_granularity = max(data_limits->discard_granularity, + pool->sectors_per_block << SECTOR_SHIFT); } else if (block_size_is_power_of_two(pool)) limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; else diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c index e1863db..7a9b98b 100644 --- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c +++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c @@ -159,6 +159,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream) /* Instruct the CX2341[56] to start sending packets */ snd_ivtv_lock(itvsc); + + if (ivtv_init_on_first_open(itv)) { + snd_ivtv_unlock(itvsc); + return -ENXIO; + } + s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM]; v4l2_fh_init(&item.fh, s->vdev); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index e1c5bf3e..c081812 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_streaming *stream, * Clocks and timestamps */ +static inline void uvc_video_get_ts(struct timespec *ts) +{ + if (uvc_clock_param == CLOCK_MONOTONIC) + ktime_get_ts(ts); + else + ktime_get_real_ts(ts); +} + static void uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, const __u8 *data, int len) @@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf, stream->clock.last_sof = dev_sof; host_sof = usb_get_current_frame_number(stream->dev->udev); - ktime_get_ts(&ts); + uvc_video_get_ts(&ts); /* The UVC specification allows device implementations that can't obtain * the USB frame number to keep their own frame counters as long as they @@ -1010,10 +1018,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream, return -ENODATA; } - if (uvc_clock_param == CLOCK_MONOTONIC) - ktime_get_ts(&ts); - else - ktime_get_real_ts(&ts); + uvc_video_get_ts(&ts); buf->buf.v4l2_buf.sequence = stream->sequence; buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 31cfe2e..40f80c3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4592,8 +4592,10 @@ void igb_update_stats(struct igb_adapter *adapter, bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); if (rqdpc) { ring->rx_stats.drops += rqdpc; diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index e85d34b..ebcce00 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -810,9 +810,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) break; case B43_PHYTYPE_G: status.band = IEEE80211_BAND_2GHZ; - /* chanid is the radio channel cookie value as used - * to tune the radio. */ - status.freq = chanid + 2400; + /* Somewhere between 478.104 and 508.1084 firmware for G-PHY + * has been modified to be compatible with N-PHY and others. + */ + if (dev->fw.rev >= 508) + status.freq = ieee80211_channel_to_frequency(chanid, status.band); + else + status.freq = chanid + 2400; break; case B43_PHYTYPE_N: case B43_PHYTYPE_LP: diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index be0f8b9..68335b5 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -304,6 +304,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) { int ret; int t = 0; + int iter; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); @@ -312,18 +313,23 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) if (ret >= 0) return 0; - /* If HW is not ready, prepare the conditions to check again */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_PREPARE); + for (iter = 0; iter < 10; iter++) { + /* If HW is not ready, prepare the conditions to check again */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + do { + ret = iwl_pcie_set_hw_ready(trans); + if (ret >= 0) + return 0; - do { - ret = iwl_pcie_set_hw_ready(trans); - if (ret >= 0) - return 0; + usleep_range(200, 1000); + t += 200; + } while (t < 150000); + msleep(25); + } - usleep_range(200, 1000); - t += 200; - } while (t < 150000); + IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter); return ret; } diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 9bea10f..2ab12a4 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1679,8 +1679,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Detect if this device has an hardware controlled radio. */ - if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) + if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) { __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); + /* + * On this device RFKILL initialized during probe does not work. + */ + __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags); + } /* * Check if the BBP tuning should be enabled. diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 0751b35..dfffbfb 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -720,6 +720,7 @@ enum rt2x00_capability_flags { REQUIRE_SW_SEQNO, REQUIRE_HT_TX_DESC, REQUIRE_PS_AUTOWAKE, + REQUIRE_DELAYED_RFKILL, /* * Capabilities diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index a1789f5..0e2cd3c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -1032,9 +1032,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) return; /* - * Unregister extra components. + * Stop rfkill polling. */ - rt2x00rfkill_unregister(rt2x00dev); + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_unregister(rt2x00dev); /* * Allow the HW to uninitialize. @@ -1072,6 +1073,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); + /* + * Start rfkill polling. + */ + if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_register(rt2x00dev); + return 0; } @@ -1262,7 +1269,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) rt2x00link_register(rt2x00dev); rt2x00leds_register(rt2x00dev); rt2x00debug_register(rt2x00dev); - rt2x00rfkill_register(rt2x00dev); + + /* + * Start rfkill polling. + */ + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_register(rt2x00dev); return 0; @@ -1278,6 +1290,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* + * Stop rfkill polling. + */ + if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags)) + rt2x00rfkill_unregister(rt2x00dev); + + /* * Disable radio. */ rt2x00lib_disable_radio(rt2x00dev); diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 65225fa..7195a30 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -489,6 +489,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, crypto.cipher = rt2x00crypto_key_to_cipher(key); if (crypto.cipher == CIPHER_NONE) return -EOPNOTSUPP; + if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev)) + return -EOPNOTSUPP; crypto.cmd = cmd; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8b51656..902cb3b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2952,6 +2952,7 @@ static void disable_igfx_irq(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); /* * Some devices may pass our check in pci_intx_mask_supported if diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 357da6b..243d8c7 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -41,6 +41,7 @@ #define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ static DECLARE_COMPLETION(at91_rtc_updated); +static DECLARE_COMPLETION(at91_rtc_upd_rdy); static unsigned int at91_alarm_year = AT91_RTC_EPOCH; static void __iomem *at91_rtc_regs; static int irq; @@ -104,6 +105,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); + wait_for_completion(&at91_rtc_upd_rdy); + /* Stop Time/Calendar from counting */ cr = at91_rtc_read(AT91_RTC_CR); at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); @@ -126,7 +129,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) /* Restart Time/Calendar */ cr = at91_rtc_read(AT91_RTC_CR); + at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_SECEV); at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM)); + at91_rtc_write_ier(AT91_RTC_SECEV); return 0; } @@ -233,8 +238,10 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF); - if (rtsr & AT91_RTC_SECEV) - events |= (RTC_UF | RTC_IRQF); + if (rtsr & AT91_RTC_SECEV) { + complete(&at91_rtc_upd_rdy); + at91_rtc_write_idr(AT91_RTC_SECEV); + } if (rtsr & AT91_RTC_ACKUPD) complete(&at91_rtc_updated); @@ -317,6 +324,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, rtc); + /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy + * completion. + */ + at91_rtc_write_ier(AT91_RTC_SECEV); + printk(KERN_INFO "AT91 Real Time Clock driver.\n"); return 0; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4884df9..3cdaed0 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -108,6 +108,25 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} @@ -143,6 +162,24 @@ static struct board_type products[] = { {0x1926103C, "Smart Array", &SA5_access}, {0x1928103C, "Smart Array", &SA5_access}, {0x334d103C, "Smart Array P822se", &SA5_access}, + {0x21BD103C, "Smart Array", &SA5_access}, + {0x21BE103C, "Smart Array", &SA5_access}, + {0x21BF103C, "Smart Array", &SA5_access}, + {0x21C0103C, "Smart Array", &SA5_access}, + {0x21C1103C, "Smart Array", &SA5_access}, + {0x21C2103C, "Smart Array", &SA5_access}, + {0x21C3103C, "Smart Array", &SA5_access}, + {0x21C4103C, "Smart Array", &SA5_access}, + {0x21C5103C, "Smart Array", &SA5_access}, + {0x21C6103C, "Smart Array", &SA5_access}, + {0x21C7103C, "Smart Array", &SA5_access}, + {0x21C8103C, "Smart Array", &SA5_access}, + {0x21C9103C, "Smart Array", &SA5_access}, + {0x21CA103C, "Smart Array", &SA5_access}, + {0x21CB103C, "Smart Array", &SA5_access}, + {0x21CC103C, "Smart Array", &SA5_access}, + {0x21CD103C, "Smart Array", &SA5_access}, + {0x21CE103C, "Smart Array", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c index 395d8f6..b15b7cb 100644 --- a/drivers/staging/iio/light/tsl2x7x_core.c +++ b/drivers/staging/iio/light/tsl2x7x_core.c @@ -725,9 +725,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev) chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] = chip->tsl2x7x_settings.prox_pulse_count; chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] = - chip->tsl2x7x_settings.prox_thres_low; + (chip->tsl2x7x_settings.prox_thres_low) & 0xFF; + chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] = + (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF; chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] = - chip->tsl2x7x_settings.prox_thres_high; + (chip->tsl2x7x_settings.prox_thres_high) & 0xFF; + chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] = + (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF; /* and make sure we're not already on */ if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) { diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index a6326f1..ba56bfe 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -460,6 +460,7 @@ int iscsit_del_np(struct iscsi_np *np) */ send_sig(SIGINT, np->np_thread, 1); kthread_stop(np->np_thread); + np->np_thread = NULL; } iscsit_del_np_comm(np); diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index b54f6ec..cbb04f3 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -342,6 +342,16 @@ static int chap_server_compute_md5( goto out; } /* + * During mutual authentication, the CHAP_C generated by the + * initiator must not match the original CHAP_C generated by + * the target. + */ + if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) { + pr_err("initiator CHAP_C matches target CHAP_C, failing" + " login attempt\n"); + goto out; + } + /* * Generate CHAP_N and CHAP_R for mutual authentication. */ tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index fdb632f..1f400ae 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -867,7 +867,7 @@ fail: static int __iscsi_target_login_thread(struct iscsi_np *np) { u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0; - int err, ret = 0, stop; + int err, ret = 0; struct iscsi_conn *conn = NULL; struct iscsi_login *login; struct iscsi_portal_group *tpg = NULL; @@ -884,6 +884,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; complete(&np->np_restart_comp); + } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { + spin_unlock_bh(&np->np_thread_lock); + goto exit; } else { np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; } @@ -1166,16 +1169,9 @@ old_sess_out: } out: - stop = kthread_should_stop(); - if (!stop && signal_pending(current)) { - spin_lock_bh(&np->np_thread_lock); - stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN); - spin_unlock_bh(&np->np_thread_lock); - } - /* Wait for another socket.. */ - if (!stop) - return 1; + return 1; +exit: iscsi_stop_login_thread_timer(np); spin_lock_bh(&np->np_thread_lock); np->np_thread_state = ISCSI_NP_THREAD_EXIT; @@ -1190,7 +1186,7 @@ int iscsi_target_login_thread(void *arg) allow_signal(SIGINT); - while (!kthread_should_stop()) { + while (1) { ret = __iscsi_target_login_thread(np); /* * We break and exit here unless another sock_accept() call diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index c461180..d0d943f 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -628,6 +628,7 @@ void core_dev_unexport( dev->export_count--; spin_unlock(&hba->device_lock); + lun->lun_sep = NULL; lun->lun_se_dev = NULL; } diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 96f1359..9981b9b 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -655,6 +655,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma) return -EINVAL; mem = idev->info->mem + mi; + if (mem->addr & ~PAGE_MASK) + return -ENODEV; if (vma->vm_end - vma->vm_start > mem->size) return -EINVAL; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 418288e..288ec5e 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -603,6 +603,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) dwc3_remove_requests(dwc, dep); + /* make sure HW endpoint isn't stalled */ + if (dep->flags & DWC3_EP_STALL) + __dwc3_gadget_ep_set_halt(dep, 0); + reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); reg &= ~DWC3_DALEPENA_EP(dep->number); dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 4c338ec..9cfe3af 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -555,6 +555,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = { DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), }, }, + { + /* HASEE E200 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"), + DMI_MATCH(DMI_BOARD_NAME, "E210"), + DMI_MATCH(DMI_BIOS_VERSION, "6.00"), + }, + }, { } }; @@ -564,9 +572,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev, { int try_handoff = 1, tried_handoff = 0; - /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying - * the handoff on its unused controller. Skip it. */ - if (pdev->vendor == 0x8086 && pdev->device == 0x283a) { + /* + * The Pegatron Lucid tablet sporadically waits for 98 seconds trying + * the handoff on its unused controller. Skip it. + * + * The HASEE E200 hangs when the semaphore is set (bugzilla #77021). + */ + if (pdev->vendor == 0x8086 && (pdev->device == 0x283a || + pdev->device == 0x27cc)) { if (dmi_check_system(ehci_dmi_nohandoff_table)) try_handoff = 0; } diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 268148d..66cf0c5 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1138,6 +1138,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async) urb->context = &completion; urb->complete = unlink1_callback; + if (usb_pipeout(urb->pipe)) { + simple_fill_buf(urb); + urb->transfer_flags |= URB_ZERO_PACKET; + } + /* keep the endpoint busy. there are lots of hc/hcd-internal * states, and testing should get to all of them over time. * @@ -1268,6 +1273,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num, unlink_queued_callback, &ctx); ctx.urbs[i]->transfer_dma = buf_dma; ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; + + if (usb_pipeout(ctx.urbs[i]->pipe)) { + simple_fill_buf(ctx.urbs[i]); + ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET; + } } /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */ diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 6c0a542..43d93db 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -145,12 +145,33 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 3)}, /* Sierra Wireless Modem Modem */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 0)}, /* Netgear AirCard 341U Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 2)}, /* Netgear AirCard 341U NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 3)}, /* Netgear AirCard 341U Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 3)}, /* Sierra Wireless Modem Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 0)}, /* Sierra Wireless Modem Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 2)}, /* Sierra Wireless Modem NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 3)}, /* Sierra Wireless Modem Modem */ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c index 38a999e..bb242bd 100644 --- a/drivers/watchdog/ath79_wdt.c +++ b/drivers/watchdog/ath79_wdt.c @@ -20,6 +20,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitops.h> +#include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> @@ -77,6 +78,15 @@ static inline void ath79_wdt_keepalive(void) static inline void ath79_wdt_enable(void) { ath79_wdt_keepalive(); + + /* + * Updating the TIMER register requires a few microseconds + * on the AR934x SoCs at least. Use a small delay to ensure + * that the TIMER register is updated within the hardware + * before enabling the watchdog. + */ + udelay(2); + ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR); /* flush write */ ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL); diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 8872642..e421182 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -60,7 +60,6 @@ * @adev: amba device structure of wdt * @status: current status of wdt * @load_val: load value to be set for current timeout - * @timeout: current programmed timeout */ struct sp805_wdt { struct watchdog_device wdd; @@ -69,7 +68,6 @@ struct sp805_wdt { struct clk *clk; struct amba_device *adev; unsigned int load_val; - unsigned int timeout; }; static bool nowayout = WATCHDOG_NOWAYOUT; @@ -99,7 +97,7 @@ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout) spin_lock(&wdt->lock); wdt->load_val = load; /* roundup timeout to closest positive integer value */ - wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate); + wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate); spin_unlock(&wdt->lock); return 0; diff --git a/fs/attr.c b/fs/attr.c index 1449adb..6a54f13 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -50,14 +50,14 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr) if ((ia_valid & ATTR_UID) && (!uid_eq(current_fsuid(), inode->i_uid) || !uid_eq(attr->ia_uid, inode->i_uid)) && - !inode_capable(inode, CAP_CHOWN)) + !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (!uid_eq(current_fsuid(), inode->i_uid) || (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && - !inode_capable(inode, CAP_CHOWN)) + !capable_wrt_inode_uidgid(inode, CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ @@ -67,7 +67,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr) /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && - !inode_capable(inode, CAP_FSETID)) + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } @@ -160,7 +160,7 @@ void setattr_copy(struct inode *inode, const struct iattr *attr) umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && - !inode_capable(inode, CAP_FSETID)) + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) mode &= ~S_ISGID; inode->i_mode = mode; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 125397e..0a800dc 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1595,6 +1595,7 @@ again: * shortening the size of the delalloc range we're searching */ free_extent_state(cached_state); + cached_state = NULL; if (!loops) { unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1); max_bytes = PAGE_CACHE_SIZE - offset; diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 71d5d0a..839a5321 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -290,7 +290,8 @@ int cifsConvertToUTF16(__le16 *target, const char *source, int srclen, const struct nls_table *cp, int mapChars) { - int i, j, charlen; + int i, charlen; + int j = 0; char src_char; __le16 dst_char; wchar_t tmp; @@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, if (!mapChars) return cifs_strtoUTF16(target, source, PATH_MAX, cp); - for (i = 0, j = 0; i < srclen; j++) { + for (i = 0; i < srclen; j++) { src_char = source[i]; charlen = 1; switch (src_char) { case 0: - put_unaligned(0, &target[j]); goto ctoUTF16_out; case ':': dst_char = cpu_to_le16(UNI_COLON); @@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, } ctoUTF16_out: + put_unaligned(0, &target[j]); /* Null terminate target unicode string */ return j; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 2167e90..a72f1c0 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3030,7 +3030,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac, } BUG_ON(start + size <= ac->ac_o_ex.fe_logical && start > ac->ac_o_ex.fe_logical); - BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); + BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); /* now prepare goal request */ diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 311d1db..a30ef89 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -446,24 +446,24 @@ int ext4_bio_write_page(struct ext4_io_submit *io, set_page_writeback(page); ClearPageError(page); + /* + * Comments copied from block_write_full_page_endio: + * + * The page straddles i_size. It must be zeroed out on each and every + * writepage invocation because it may be mmapped. "A file is mapped + * in multiples of the page size. For a file that is not a multiple of + * the page size, the remaining memory is zeroed when mapped, and + * writes to that region are not written out to the file." + */ + if (len < PAGE_CACHE_SIZE) + zero_user_segment(page, len, PAGE_CACHE_SIZE); + for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; if (block_start >= len) { - /* - * Comments copied from block_write_full_page_endio: - * - * The page straddles i_size. It must be zeroed out on - * each and every writepage invocation because it may - * be mmapped. "A file is mapped in multiples of the - * page size. For a file that is not a multiple of - * the page size, the remaining memory is zeroed when - * mapped, and writes to that region are not written - * out to the file." - */ - zero_user_segment(page, block_start, block_end); clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; diff --git a/fs/inode.c b/fs/inode.c index b98540e..2e4031e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1842,14 +1842,18 @@ EXPORT_SYMBOL(inode_init_owner); * inode_owner_or_capable - check current task permissions to inode * @inode: inode being checked * - * Return true if current either has CAP_FOWNER to the inode, or - * owns the file. + * Return true if current either has CAP_FOWNER in a namespace with the + * inode owner uid mapped, or owns the file. */ bool inode_owner_or_capable(const struct inode *inode) { + struct user_namespace *ns; + if (uid_eq(current_fsuid(), inode->i_uid)) return true; - if (inode_capable(inode, CAP_FOWNER)) + + ns = current_user_ns(); + if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) return true; return false; } diff --git a/fs/namei.c b/fs/namei.c index 77dc200..8bbff22 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -321,10 +321,11 @@ int generic_permission(struct inode *inode, int mask) if (S_ISDIR(inode->i_mode)) { /* DACs are overridable for directories */ - if (inode_capable(inode, CAP_DAC_OVERRIDE)) + if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; if (!(mask & MAY_WRITE)) - if (inode_capable(inode, CAP_DAC_READ_SEARCH)) + if (capable_wrt_inode_uidgid(inode, + CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } @@ -334,7 +335,7 @@ int generic_permission(struct inode *inode, int mask) * at least one exec bit set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) - if (inode_capable(inode, CAP_DAC_OVERRIDE)) + if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; /* @@ -342,7 +343,7 @@ int generic_permission(struct inode *inode, int mask) */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ) - if (inode_capable(inode, CAP_DAC_READ_SEARCH)) + if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; return -EACCES; @@ -2208,7 +2209,7 @@ static inline int check_sticky(struct inode *dir, struct inode *inode) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; - return !inode_capable(inode, CAP_FOWNER); + return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); } /* diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index ebeb94c..a5130f1 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1381,18 +1381,20 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode->i_version = fattr->change_attr; } } else if (server->caps & NFS_CAP_CHANGE_ATTR) - invalid |= save_cache_validity; + nfsi->cache_validity |= save_cache_validity; if (fattr->valid & NFS_ATTR_FATTR_MTIME) { memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); } else if (server->caps & NFS_CAP_MTIME) - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATTR | NFS_INO_REVAL_FORCED); if (fattr->valid & NFS_ATTR_FATTR_CTIME) { memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); } else if (server->caps & NFS_CAP_CTIME) - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATTR | NFS_INO_REVAL_FORCED); /* Check if our cached file size is stale */ @@ -1415,7 +1417,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) (long long)new_isize); } } else - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATTR | NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED); @@ -1423,7 +1426,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (fattr->valid & NFS_ATTR_FATTR_ATIME) memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime)); else if (server->caps & NFS_CAP_ATIME) - invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATIME | NFS_INO_REVAL_FORCED); if (fattr->valid & NFS_ATTR_FATTR_MODE) { @@ -1434,7 +1438,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; } } else if (server->caps & NFS_CAP_MODE) - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | NFS_INO_REVAL_FORCED); @@ -1445,7 +1450,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode->i_uid = fattr->uid; } } else if (server->caps & NFS_CAP_OWNER) - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | NFS_INO_REVAL_FORCED); @@ -1456,7 +1462,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode->i_gid = fattr->gid; } } else if (server->caps & NFS_CAP_OWNER_GROUP) - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | NFS_INO_REVAL_FORCED); @@ -1469,7 +1476,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) set_nlink(inode, fattr->nlink); } } else if (server->caps & NFS_CAP_NLINK) - invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR + nfsi->cache_validity |= save_cache_validity & + (NFS_INO_INVALID_ATTR | NFS_INO_REVAL_FORCED); if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { diff --git a/fs/nfs/super.c b/fs/nfs/super.c index b056b16..f4d000a 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2167,6 +2167,7 @@ nfs_remount(struct super_block *sb, int *flags, char *raw_data) data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ; data->nfs_server.port = nfss->port; data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen; + data->net = current->nsproxy->net_ns; memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr, data->nfs_server.addrlen); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d5e1349..5986138 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3520,7 +3520,7 @@ nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp) * correspondance, and we have to delete the lockowner when we * delete the lock stateid: */ - unhash_lockowner(lo); + release_lockowner(lo); return nfs_ok; } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index dc12d02..562fab9 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2048,8 +2048,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, err = vfs_getattr(exp->ex_path.mnt, dentry, &stat); if (err) goto out_nfserr; - if ((bmval0 & (FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | - FATTR4_WORD0_MAXNAME)) || + if ((bmval0 & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE | + FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) || (bmval1 & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL))) { err = vfs_statfs(&path, &statfs); diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index ca71b1f..0c1e96e 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3210,8 +3210,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) attr->ia_size != i_size_read(inode)) { error = inode_newsize_ok(inode, attr->ia_size); if (!error) { + /* + * Could race against reiserfs_file_release + * if called from NFS, so take tailpack mutex. + */ + mutex_lock(&REISERFS_I(inode)->tailpack); truncate_setsize(inode, attr->ia_size); - reiserfs_vfs_truncate_file(inode); + reiserfs_truncate_file(inode, 1); + mutex_unlock(&REISERFS_I(inode)->tailpack); } } diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 5bc7781..bc7798d 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1522,8 +1522,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma, ubifs_release_dirty_inode_budget(c, ui); } - unlock_page(page); - return 0; + return VM_FAULT_LOCKED; out_unlock: unlock_page(page); diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index 9e1d056..e0a7a76 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c @@ -128,7 +128,6 @@ static int shrink_tnc(struct ubifs_info *c, int nr, int age, int *contention) freed = ubifs_destroy_tnc_subtree(znode); atomic_long_sub(freed, &ubifs_clean_zn_cnt); atomic_long_sub(freed, &c->clean_zn_cnt); - ubifs_assert(atomic_long_read(&c->clean_zn_cnt) >= 0); total_freed += freed; znode = zprev; } diff --git a/include/linux/capability.h b/include/linux/capability.h index d9a4f7f..15f9092 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -211,7 +211,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, extern bool capable(int cap); extern bool ns_capable(struct user_namespace *ns, int cap); extern bool nsown_capable(int cap); -extern bool inode_capable(const struct inode *inode, int cap); +extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); /* audit system wants to get cap info from files as well */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 623325e..078bc2f 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -27,6 +27,8 @@ struct irq_desc; * @irq_count: stats field to detect stalled irqs * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts + * @threads_handled: stats field for deferred spurious detection of threaded handlers + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes @@ -52,6 +54,8 @@ struct irq_desc { unsigned int irq_count; /* For detecting broken IRQs */ unsigned long last_unhandled; /* Aging timer for unhandled count */ unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; raw_spinlock_t lock; struct cpumask *percpu_enabled; #ifdef CONFIG_SMP diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 73b64a3..d1584a4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -73,9 +73,13 @@ enum { extern int page_group_by_mobility_disabled; +#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) +#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) + static inline int get_pageblock_migratetype(struct page *page) { - return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); + BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2); + return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK); } struct free_area { diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index be655e4..bed78c9 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -30,9 +30,12 @@ enum pageblock_bits { PB_migrate, PB_migrate_end = PB_migrate + 3 - 1, /* 3 bits required for migrate types */ -#ifdef CONFIG_COMPACTION PB_migrate_skip,/* If set the block is skipped by compaction */ -#endif /* CONFIG_COMPACTION */ + + /* + * Assume the bits will always align on a word. If this assumption + * changes then get/set pageblock needs updating. + */ NR_PAGEBLOCK_BITS }; @@ -62,11 +65,33 @@ extern int pageblock_order; /* Forward declaration */ struct page; +unsigned long get_pageblock_flags_mask(struct page *page, + unsigned long end_bitidx, + unsigned long mask); +void set_pageblock_flags_mask(struct page *page, + unsigned long flags, + unsigned long end_bitidx, + unsigned long mask); + /* Declarations for getting and setting flags. See mm/page_alloc.c */ -unsigned long get_pageblock_flags_group(struct page *page, - int start_bitidx, int end_bitidx); -void set_pageblock_flags_group(struct page *page, unsigned long flags, - int start_bitidx, int end_bitidx); +static inline unsigned long get_pageblock_flags_group(struct page *page, + int start_bitidx, int end_bitidx) +{ + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; + unsigned long mask = (1 << nr_flag_bits) - 1; + + return get_pageblock_flags_mask(page, end_bitidx, mask); +} + +static inline void set_pageblock_flags_group(struct page *page, + unsigned long flags, + int start_bitidx, int end_bitidx) +{ + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1; + unsigned long mask = (1 << nr_flag_bits) - 1; + + set_pageblock_flags_mask(page, flags, end_bitidx, mask); +} #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0eb6579..ed403e9 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -754,6 +754,7 @@ #define PCI_DEVICE_ID_HP_CISSE 0x323a #define PCI_DEVICE_ID_HP_CISSF 0x323b #define PCI_DEVICE_ID_HP_CISSH 0x323c +#define PCI_DEVICE_ID_HP_CISSI 0x3239 #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 #define PCI_VENDOR_ID_PCTECH 0x1042 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 89573a3..2e99b8e 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -5,6 +5,7 @@ #include <linux/sched.h> /* For struct task_struct. */ #include <linux/err.h> /* for IS_ERR_VALUE */ #include <linux/bug.h> /* For BUG_ON. */ +#include <linux/pid_namespace.h> /* For task_active_pid_ns. */ #include <uapi/linux/ptrace.h> /* @@ -129,6 +130,37 @@ static inline void ptrace_event(int event, unsigned long message) } /** + * ptrace_event_pid - possibly stop for a ptrace event notification + * @event: %PTRACE_EVENT_* value to report + * @pid: process identifier for %PTRACE_GETEVENTMSG to return + * + * Check whether @event is enabled and, if so, report @event and @pid + * to the ptrace parent. @pid is reported as the pid_t seen from the + * the ptrace parent's pid namespace. + * + * Called without locks. + */ +static inline void ptrace_event_pid(int event, struct pid *pid) +{ + /* + * FIXME: There's a potential race if a ptracer in a different pid + * namespace than parent attaches between computing message below and + * when we acquire tasklist_lock in ptrace_stop(). If this happens, + * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG. + */ + unsigned long message = 0; + struct pid_namespace *ns; + + rcu_read_lock(); + ns = task_active_pid_ns(rcu_dereference(current->parent)); + if (ns) + message = pid_nr_ns(pid, ns); + rcu_read_unlock(); + + ptrace_event(event, message); +} + +/** * ptrace_init_task - initialize ptrace state for a new child * @child: new child task * @ptrace: true if child should be ptrace'd by parent's tracer diff --git a/include/sound/core.h b/include/sound/core.h index 93896ad..8d29d28 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -120,6 +120,8 @@ struct snd_card { int user_ctl_count; /* count of all user controls */ struct list_head controls; /* all controls for this card */ struct list_head ctl_files; /* active control files */ + struct mutex user_ctl_lock; /* protects user controls against + concurrent access */ struct snd_info_entry *proc_root; /* root for soundcard specific files */ struct snd_info_entry *proc_id; /* the card id */ diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index 9aa8624..9c94dd8 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -80,7 +80,7 @@ struct snd_compr_tstamp { struct snd_compr_avail { __u64 avail; struct snd_compr_tstamp tstamp; -}; +} __attribute__((packed)); enum snd_compr_direction { SND_COMPRESS_PLAYBACK = 0, diff --git a/kernel/auditsc.c b/kernel/auditsc.c index ba198b1..999a48d 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -859,6 +859,22 @@ static enum audit_state audit_filter_task(struct task_struct *tsk, char **key) return AUDIT_BUILD_CONTEXT; } +static int audit_in_mask(const struct audit_krule *rule, unsigned long val) +{ + int word, bit; + + if (val > 0xffffffff) + return false; + + word = AUDIT_WORD(val); + if (word >= AUDIT_BITMASK_SIZE) + return false; + + bit = AUDIT_BIT(val); + + return rule->mask[word] & bit; +} + /* At syscall entry and exit time, this filter is called if the * audit_state is not low enough that auditing cannot take place, but is * also not high enough that we already know we have to write an audit @@ -876,11 +892,8 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, rcu_read_lock(); if (!list_empty(list)) { - int word = AUDIT_WORD(ctx->major); - int bit = AUDIT_BIT(ctx->major); - list_for_each_entry_rcu(e, list, list) { - if ((e->rule.mask[word] & bit) == bit && + if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, NULL, &state, false)) { rcu_read_unlock(); @@ -900,20 +913,16 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, static int audit_filter_inode_name(struct task_struct *tsk, struct audit_names *n, struct audit_context *ctx) { - int word, bit; int h = audit_hash_ino((u32)n->ino); struct list_head *list = &audit_inode_hash[h]; struct audit_entry *e; enum audit_state state; - word = AUDIT_WORD(ctx->major); - bit = AUDIT_BIT(ctx->major); - if (list_empty(list)) return 0; list_for_each_entry_rcu(e, list, list) { - if ((e->rule.mask[word] & bit) == bit && + if (audit_in_mask(&e->rule, ctx->major) && audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) { ctx->current_state = state; return 1; diff --git a/kernel/capability.c b/kernel/capability.c index f6c2ce5..d52eecc0 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -445,22 +445,18 @@ bool nsown_capable(int cap) } /** - * inode_capable - Check superior capability over inode + * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped * @inode: The inode in question * @cap: The capability in question * - * Return true if the current task has the given superior capability - * targeted at it's own user namespace and that the given inode is owned - * by the current user namespace or a child namespace. - * - * Currently we check to see if an inode is owned by the current - * user namespace by seeing if the inode's owner maps into the - * current user namespace. - * + * Return true if the current task has the given capability targeted at + * its own user namespace and that the given inode's uid and gid are + * mapped into the current user namespace. */ -bool inode_capable(const struct inode *inode, int cap) +bool capable_wrt_inode_uidgid(const struct inode *inode, int cap) { struct user_namespace *ns = current_user_ns(); - return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid); + return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) && + kgid_has_mapping(ns, inode->i_gid); } diff --git a/kernel/fork.c b/kernel/fork.c index 65b8b0a..e788b14 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1596,10 +1596,12 @@ long do_fork(unsigned long clone_flags, */ if (!IS_ERR(p)) { struct completion vfork; + struct pid *pid; trace_sched_process_fork(current, p); - nr = task_pid_vnr(p); + pid = get_task_pid(p, PIDTYPE_PID); + nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, parent_tidptr); @@ -1614,12 +1616,14 @@ long do_fork(unsigned long clone_flags, /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) - ptrace_event(trace, nr); + ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) - ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); + ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } + + put_pid(pid); } else { nr = PTR_ERR(p); } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index edbe80e..f26e367 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -860,8 +860,8 @@ static int irq_thread(void *data) irq_thread_check_affinity(desc, action); action_ret = handler_fn(desc, action); - if (!noirqdebug) - note_interrupt(action->irq, desc, action_ret); + if (action_ret == IRQ_HANDLED) + atomic_inc(&desc->threads_handled); wake_threads_waitq(desc); } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 7b5f012..febcee3c 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, return action && (action->flags & IRQF_IRQPOLL); } +#define SPURIOUS_DEFERRED 0x80000000 + void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { if (desc->istate & IRQS_POLL_INPROGRESS) return; - /* we get here again via the threaded handler */ - if (action_ret == IRQ_WAKE_THREAD) - return; - if (bad_action_ret(action_ret)) { report_bad_irq(irq, desc, action_ret); return; } + /* + * We cannot call note_interrupt from the threaded handler + * because we need to look at the compound of all handlers + * (primary and threaded). Aside of that in the threaded + * shared case we have no serialization against an incoming + * hardware interrupt while we are dealing with a threaded + * result. + * + * So in case a thread is woken, we just note the fact and + * defer the analysis to the next hardware interrupt. + * + * The threaded handlers store whether they sucessfully + * handled an interrupt and we check whether that number + * changed versus the last invocation. + * + * We could handle all interrupts with the delayed by one + * mechanism, but for the non forced threaded case we'd just + * add pointless overhead to the straight hardirq interrupts + * for the sake of a few lines less code. + */ + if (action_ret & IRQ_WAKE_THREAD) { + /* + * There is a thread woken. Check whether one of the + * shared primary handlers returned IRQ_HANDLED. If + * not we defer the spurious detection to the next + * interrupt. + */ + if (action_ret == IRQ_WAKE_THREAD) { + int handled; + /* + * We use bit 31 of thread_handled_last to + * denote the deferred spurious detection + * active. No locking necessary as + * thread_handled_last is only accessed here + * and we have the guarantee that hard + * interrupts are not reentrant. + */ + if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { + desc->threads_handled_last |= SPURIOUS_DEFERRED; + return; + } + /* + * Check whether one of the threaded handlers + * returned IRQ_HANDLED since the last + * interrupt happened. + * + * For simplicity we just set bit 31, as it is + * set in threads_handled_last as well. So we + * avoid extra masking. And we really do not + * care about the high bits of the handled + * count. We just care about the count being + * different than the one we saw before. + */ + handled = atomic_read(&desc->threads_handled); + handled |= SPURIOUS_DEFERRED; + if (handled != desc->threads_handled_last) { + action_ret = IRQ_HANDLED; + /* + * Note: We keep the SPURIOUS_DEFERRED + * bit set. We are handling the + * previous invocation right now. + * Keep it for the current one, so the + * next hardware interrupt will + * account for it. + */ + desc->threads_handled_last = handled; + } else { + /* + * None of the threaded handlers felt + * responsible for the last interrupt + * + * We keep the SPURIOUS_DEFERRED bit + * set in threads_handled_last as we + * need to account for the current + * interrupt as well. + */ + action_ret = IRQ_NONE; + } + } else { + /* + * One of the primary handlers returned + * IRQ_HANDLED. So we don't care about the + * threaded handlers on the same line. Clear + * the deferred detection bit. + * + * In theory we could/should check whether the + * deferred bit is set and take the result of + * the previous run into account here as + * well. But it's really not worth the + * trouble. If every other interrupt is + * handled we never trigger the spurious + * detector. And if this is just the one out + * of 100k unhandled ones which is handled + * then we merily delay the spurious detection + * by one hard interrupt. Not a real problem. + */ + desc->threads_handled_last &= ~SPURIOUS_DEFERRED; + } + } + if (unlikely(action_ret == IRQ_NONE)) { /* * If we are seeing only the odd spurious IRQ caused by diff --git a/lib/idr.c b/lib/idr.c index 60f7619..fb49014 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -167,7 +167,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; /* if already at the top layer, we need to grow */ - if (id >= 1 << (idp->layers * IDR_BITS)) { + if (id > idr_max(idp->layers)) { *starting_id = id; return IDR_NEED_TO_GROW; } @@ -672,14 +672,12 @@ void *idr_replace(struct idr *idp, void *ptr, int id) if (!p) return ERR_PTR(-EINVAL); - n = (p->layer+1) * IDR_BITS; - id &= MAX_IDR_MASK; - if (id >= (1 << n)) + if (id > idr_max(p->layer + 1)) return ERR_PTR(-EINVAL); - n -= IDR_BITS; + n = p->layer * IDR_BITS; while ((n > 0) && p) { p = p->ary[(id >> n) & IDR_MASK]; n -= IDR_BITS; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 35fc5eb..8f11a87 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2291,6 +2291,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, update_mmu_cache(vma, address, ptep); } +static int is_hugetlb_entry_migration(pte_t pte) +{ + swp_entry_t swp; + + if (huge_pte_none(pte) || pte_present(pte)) + return 0; + swp = pte_to_swp_entry(pte); + if (non_swap_entry(swp) && is_migration_entry(swp)) + return 1; + else + return 0; +} + +static int is_hugetlb_entry_hwpoisoned(pte_t pte) +{ + swp_entry_t swp; + + if (huge_pte_none(pte) || pte_present(pte)) + return 0; + swp = pte_to_swp_entry(pte); + if (non_swap_entry(swp) && is_hwpoison_entry(swp)) + return 1; + else + return 0; +} int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) @@ -2318,7 +2343,24 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, spin_lock(&dst->page_table_lock); spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING); - if (!huge_pte_none(huge_ptep_get(src_pte))) { + entry = huge_ptep_get(src_pte); + if (huge_pte_none(entry)) { /* skip none entry */ + ; + } else if (unlikely(is_hugetlb_entry_migration(entry) || + is_hugetlb_entry_hwpoisoned(entry))) { + swp_entry_t swp_entry = pte_to_swp_entry(entry); + + if (is_write_migration_entry(swp_entry) && cow) { + /* + * COW mappings require pages in both + * parent and child to be set to read. + */ + make_migration_entry_read(&swp_entry); + entry = swp_entry_to_pte(swp_entry); + set_huge_pte_at(src, addr, src_pte, entry); + } + set_huge_pte_at(dst, addr, dst_pte, entry); + } else { if (cow) huge_ptep_set_wrprotect(src, addr, src_pte); entry = huge_ptep_get(src_pte); @@ -2336,32 +2378,6 @@ nomem: return -ENOMEM; } -static int is_hugetlb_entry_migration(pte_t pte) -{ - swp_entry_t swp; - - if (huge_pte_none(pte) || pte_present(pte)) - return 0; - swp = pte_to_swp_entry(pte); - if (non_swap_entry(swp) && is_migration_entry(swp)) - return 1; - else - return 0; -} - -static int is_hugetlb_entry_hwpoisoned(pte_t pte) -{ - swp_entry_t swp; - - if (huge_pte_none(pte) || pte_present(pte)) - return 0; - swp = pte_to_swp_entry(pte); - if (non_swap_entry(swp) && is_hwpoison_entry(swp)) - return 1; - else - return 0; -} - void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3e6bb9f..c29c7d5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2502,8 +2502,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm, * in system level. So, allow to go ahead dying process in addition to * MEMDIE process. */ - if (unlikely(test_thread_flag(TIF_MEMDIE) - || fatal_signal_pending(current))) + if (unlikely(test_thread_flag(TIF_MEMDIE) || + fatal_signal_pending(current) || + current->flags & PF_EXITING)) goto bypass; /* diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d3ed748..7f3ceb1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -208,9 +208,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, #endif si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT; - if ((flags & MF_ACTION_REQUIRED) && t == current) { + if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { si.si_code = BUS_MCEERR_AR; - ret = force_sig_info(SIGBUS, &si, t); + ret = force_sig_info(SIGBUS, &si, current); } else { /* * Don't use force here, it's convenient if the signal @@ -382,10 +382,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, } } -static int task_early_kill(struct task_struct *tsk) +static int task_early_kill(struct task_struct *tsk, int force_early) { if (!tsk->mm) return 0; + if (force_early) + return 1; if (tsk->flags & PF_MCE_PROCESS) return !!(tsk->flags & PF_MCE_EARLY); return sysctl_memory_failure_early_kill; @@ -395,7 +397,7 @@ static int task_early_kill(struct task_struct *tsk) * Collect processes when the error hit an anonymous page. */ static void collect_procs_anon(struct page *page, struct list_head *to_kill, - struct to_kill **tkc) + struct to_kill **tkc, int force_early) { struct vm_area_struct *vma; struct task_struct *tsk; @@ -411,7 +413,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, for_each_process (tsk) { struct anon_vma_chain *vmac; - if (!task_early_kill(tsk)) + if (!task_early_kill(tsk, force_early)) continue; anon_vma_interval_tree_foreach(vmac, &av->rb_root, pgoff, pgoff) { @@ -430,7 +432,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, * Collect processes when the error hit a file mapped page. */ static void collect_procs_file(struct page *page, struct list_head *to_kill, - struct to_kill **tkc) + struct to_kill **tkc, int force_early) { struct vm_area_struct *vma; struct task_struct *tsk; @@ -441,7 +443,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, for_each_process(tsk) { pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - if (!task_early_kill(tsk)) + if (!task_early_kill(tsk, force_early)) continue; vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, @@ -467,7 +469,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill, * First preallocate one tokill structure outside the spin locks, * so that we can kill at least one process reasonably reliable. */ -static void collect_procs(struct page *page, struct list_head *tokill) +static void collect_procs(struct page *page, struct list_head *tokill, + int force_early) { struct to_kill *tk; @@ -478,9 +481,9 @@ static void collect_procs(struct page *page, struct list_head *tokill) if (!tk) return; if (PageAnon(page)) - collect_procs_anon(page, tokill, &tk); + collect_procs_anon(page, tokill, &tk, force_early); else - collect_procs_file(page, tokill, &tk); + collect_procs_file(page, tokill, &tk, force_early); kfree(tk); } @@ -950,7 +953,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * there's nothing that can be done. */ if (kill) - collect_procs(ppage, &tokill); + collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED); if (hpage != ppage) lock_page(ppage); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index eb2848e..58198da 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -621,19 +621,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma, * If pagelist != NULL then isolate pages from the LRU and * put them on the pagelist. */ -static struct vm_area_struct * +static int check_range(struct mm_struct *mm, unsigned long start, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) { - int err; - struct vm_area_struct *first, *vma, *prev; - + int err = 0; + struct vm_area_struct *vma, *prev; - first = find_vma(mm, start); - if (!first) - return ERR_PTR(-EFAULT); + vma = find_vma(mm, start); + if (!vma) + return -EFAULT; prev = NULL; - for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { + for (; vma && vma->vm_start < end; vma = vma->vm_next) { unsigned long endvma = vma->vm_end; if (endvma > end) @@ -643,9 +642,9 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, if (!(flags & MPOL_MF_DISCONTIG_OK)) { if (!vma->vm_next && vma->vm_end < end) - return ERR_PTR(-EFAULT); + return -EFAULT; if (prev && prev->vm_end < vma->vm_start) - return ERR_PTR(-EFAULT); + return -EFAULT; } if (is_vm_hugetlb_page(vma)) @@ -662,15 +661,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, err = check_pgd_range(vma, start, endvma, nodes, flags, private); - if (err) { - first = ERR_PTR(err); + if (err) break; - } } next: prev = vma; } - return first; + return err; } /* @@ -1152,16 +1149,17 @@ out: /* * Allocate a new page for page migration based on vma policy. - * Start assuming that page is mapped by vma pointed to by @private. + * Start by assuming the page is mapped by the same vma as contains @start. * Search forward from there, if not. N.B., this assumes that the * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ -static struct page *new_vma_page(struct page *page, unsigned long private, int **x) +static struct page *new_page(struct page *page, unsigned long start, int **x) { - struct vm_area_struct *vma = (struct vm_area_struct *)private; + struct vm_area_struct *vma; unsigned long uninitialized_var(address); + vma = find_vma(current->mm, start); while (vma) { address = page_address_in_vma(page, vma); if (address != -EFAULT) @@ -1187,7 +1185,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, return -ENOSYS; } -static struct page *new_vma_page(struct page *page, unsigned long private, int **x) +static struct page *new_page(struct page *page, unsigned long start, int **x) { return NULL; } @@ -1197,7 +1195,6 @@ static long do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, nodemask_t *nmask, unsigned long flags) { - struct vm_area_struct *vma; struct mm_struct *mm = current->mm; struct mempolicy *new; unsigned long end; @@ -1263,11 +1260,9 @@ static long do_mbind(unsigned long start, unsigned long len, if (err) goto mpol_out; - vma = check_range(mm, start, end, nmask, + err = check_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); - - err = PTR_ERR(vma); /* maybe ... */ - if (!IS_ERR(vma)) + if (!err) err = mbind_range(mm, start, end, new); if (!err) { @@ -1275,8 +1270,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); - nr_failed = migrate_pages(&pagelist, new_vma_page, - (unsigned long)vma, + nr_failed = migrate_pages(&pagelist, new_page, start, false, MIGRATE_SYNC, MR_MEMPOLICY_MBIND); if (nr_failed) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7708f66..cea283e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5613,54 +5613,65 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) * @end_bitidx: The last bit of interest * returns pageblock_bits flags */ -unsigned long get_pageblock_flags_group(struct page *page, - int start_bitidx, int end_bitidx) +unsigned long get_pageblock_flags_mask(struct page *page, + unsigned long end_bitidx, + unsigned long mask) { struct zone *zone; unsigned long *bitmap; - unsigned long pfn, bitidx; - unsigned long flags = 0; - unsigned long value = 1; + unsigned long pfn, bitidx, word_bitidx; + unsigned long word; zone = page_zone(page); pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); + word_bitidx = bitidx / BITS_PER_LONG; + bitidx &= (BITS_PER_LONG-1); - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) - if (test_bit(bitidx + start_bitidx, bitmap)) - flags |= value; - - return flags; + word = bitmap[word_bitidx]; + bitidx += end_bitidx; + return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; } /** - * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages + * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages * @page: The page within the block of interest * @start_bitidx: The first bit of interest * @end_bitidx: The last bit of interest * @flags: The flags to set */ -void set_pageblock_flags_group(struct page *page, unsigned long flags, - int start_bitidx, int end_bitidx) +void set_pageblock_flags_mask(struct page *page, unsigned long flags, + unsigned long end_bitidx, + unsigned long mask) { struct zone *zone; unsigned long *bitmap; - unsigned long pfn, bitidx; - unsigned long value = 1; + unsigned long pfn, bitidx, word_bitidx; + unsigned long old_word, word; + + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); zone = page_zone(page); pfn = page_to_pfn(page); bitmap = get_pageblock_bitmap(zone, pfn); bitidx = pfn_to_bitidx(zone, pfn); + word_bitidx = bitidx / BITS_PER_LONG; + bitidx &= (BITS_PER_LONG-1); VM_BUG_ON(pfn < zone->zone_start_pfn); VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) - if (flags & value) - __set_bit(bitidx + start_bitidx, bitmap); - else - __clear_bit(bitidx + start_bitidx, bitmap); + bitidx += end_bitidx; + mask <<= (BITS_PER_LONG - bitidx - 1); + flags <<= (BITS_PER_LONG - bitidx - 1); + + word = ACCESS_ONCE(bitmap[word_bitidx]); + for (;;) { + old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); + if (word == old_word) + break; + word = old_word; + } } /* diff --git a/mm/rmap.c b/mm/rmap.c index 770320b..f350f9f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ + might_sleep(); if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); anon_vma_unlock_write(anon_vma); @@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page) * above cannot corrupt). */ if (!page_mapped(page)) { + rcu_read_unlock(); put_anon_vma(anon_vma); - anon_vma = NULL; + return NULL; } out: rcu_read_unlock(); @@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page) } if (!page_mapped(page)) { + rcu_read_unlock(); put_anon_vma(anon_vma); - anon_vma = NULL; - goto out; + return NULL; } /* we pinned the anon_vma, its safe to sleep */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 9dcc836..cfd005c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2226,10 +2226,17 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) for (i = 0; i <= ZONE_NORMAL; i++) { zone = &pgdat->node_zones[i]; + if (!populated_zone(zone)) + continue; + pfmemalloc_reserve += min_wmark_pages(zone); free_pages += zone_page_state(zone, NR_FREE_PAGES); } + /* If there are no reserves (unexpected config) then do not throttle */ + if (!pfmemalloc_reserve) + return true; + wmark_ok = free_pages > pfmemalloc_reserve / 2; /* kswapd must be awake if processes are being throttled */ @@ -2254,9 +2261,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat) static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, nodemask_t *nodemask) { + struct zoneref *z; struct zone *zone; - int high_zoneidx = gfp_zone(gfp_mask); - pg_data_t *pgdat; + pg_data_t *pgdat = NULL; /* * Kernel threads should not be throttled as they may be indirectly @@ -2275,10 +2282,34 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, if (fatal_signal_pending(current)) goto out; - /* Check if the pfmemalloc reserves are ok */ - first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone); - pgdat = zone->zone_pgdat; - if (pfmemalloc_watermark_ok(pgdat)) + /* + * Check if the pfmemalloc reserves are ok by finding the first node + * with a usable ZONE_NORMAL or lower zone. The expectation is that + * GFP_KERNEL will be required for allocating network buffers when + * swapping over the network so ZONE_HIGHMEM is unusable. + * + * Throttling is based on the first usable node and throttled processes + * wait on a queue until kswapd makes progress and wakes them. There + * is an affinity then between processes waking up and where reclaim + * progress has been made assuming the process wakes on the same node. + * More importantly, processes running on remote nodes will not compete + * for remote pfmemalloc reserves and processes on different nodes + * should make reasonable progress. + */ + for_each_zone_zonelist_nodemask(zone, z, zonelist, + gfp_mask, nodemask) { + if (zone_idx(zone) > ZONE_NORMAL) + continue; + + /* Throttle based on the first usable node */ + pgdat = zone->zone_pgdat; + if (pfmemalloc_watermark_ok(pgdat)) + goto out; + break; + } + + /* If no zone was usable by the allocation flags then do not throttle */ + if (!pgdat) goto out; /* Account for the throttling */ @@ -3025,7 +3056,10 @@ static int kswapd(void *p) } } + tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); current->reclaim_state = NULL; + lockdep_clear_current_reclaim_state(); + return 0; } diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 4925a02..947a965 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -643,14 +643,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { struct hci_cp_auth_requested cp; - /* encrypt must be pending if auth is also pending */ - set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); - cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); - if (conn->key_type != 0xff) + + /* If we're already encrypted set the REAUTH_PEND flag, + * otherwise set the ENCRYPT_PEND. + */ + if (conn->link_mode & HCI_LM_ENCRYPT) set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); + else + set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); } return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index ba6f399..86cd8f5 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -55,6 +55,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); + hci_dev_lock(hdev); + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + hci_dev_unlock(hdev); + hci_conn_check_pending(hdev); } @@ -3573,8 +3577,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, /* If we're not the initiators request authorization to * proceed from user space (mgmt_user_confirm with - * confirm_hint set to 1). */ - if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { + * confirm_hint set to 1). The exception is if neither + * side had MITM in which case we do auto-accept. + */ + if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && + (loc_mitm || rem_mitm)) { BT_DBG("Confirming auto-accept as acceptor"); confirm_hint = 1; goto confirm; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 1bcfb84..a166b26 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -625,11 +625,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, /*change security for LE channels */ if (chan->scid == L2CAP_CID_LE_DATA) { - if (!conn->hcon->out) { - err = -EINVAL; - break; - } - if (smp_conn_security(conn->hcon, sec.level)) break; sk->sk_state = BT_CONFIG; @@ -943,13 +938,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); + release_sock(parent); return NULL; } sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); - if (!sk) + if (!sk) { + release_sock(parent); return NULL; + } bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index d8ba861..e470516 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2048,8 +2048,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, } if (type == BDADDR_LE_PUBLIC || type == BDADDR_LE_RANDOM) { - /* Continue with pairing via SMP */ + /* Continue with pairing via SMP. The hdev lock must be + * released as SMP may try to recquire it for crypto + * purposes. + */ + hci_dev_unlock(hdev); err = smp_user_confirm_reply(conn, mgmt_op, passkey); + hci_dev_lock(hdev); if (!err) err = cmd_status(sk, hdev->id, mgmt_op, diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 5abefb1..ba765a4 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -464,7 +464,7 @@ static void random_work(struct work_struct *work) hci_le_start_enc(hcon, ediv, rand, stk); hcon->enc_key_size = smp->enc_key_size; } else { - u8 stk[16], r[16], rand[8]; + u8 stk[16], r[16], rand[8], auth; __le16 ediv; memset(rand, 0, sizeof(rand)); @@ -479,8 +479,13 @@ static void random_work(struct work_struct *work) memset(stk + smp->enc_key_size, 0, SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); + if (hcon->pending_sec_level == BT_SECURITY_HIGH) + auth = 1; + else + auth = 0; + hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type, - HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size, + HCI_SMP_STK_SLAVE, 0, auth, stk, smp->enc_key_size, ediv, rand); } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index cbde5cc..8736537 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -34,8 +34,7 @@ static ssize_t ieee80211_if_read( ssize_t ret = -EINVAL; read_lock(&dev_base_lock); - if (sdata->dev->reg_state == NETREG_REGISTERED) - ret = (*format)(sdata, buf, sizeof(buf)); + ret = (*format)(sdata, buf, sizeof(buf)); read_unlock(&dev_base_lock); if (ret >= 0) @@ -62,8 +61,7 @@ static ssize_t ieee80211_if_write( ret = -ENODEV; rtnl_lock(); - if (sdata->dev->reg_state == NETREG_REGISTERED) - ret = (*write)(sdata, buf, count); + ret = (*write)(sdata, buf, count); rtnl_unlock(); return ret; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 217064a..808628a 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1091,6 +1091,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->u.ibss.privacy = params->privacy; sdata->u.ibss.control_port = params->control_port; sdata->u.ibss.basic_rates = params->basic_rates; + sdata->u.ibss.last_scan_completed = jiffies; memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate, sizeof(params->mcast_rate)); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 4bc2aaf..e40b77d 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -486,6 +486,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data) return i->status & IPS_NAT_MASK ? 1 : 0; } +static int nf_nat_proto_clean(struct nf_conn *ct, void *data) +{ + struct nf_conn_nat *nat = nfct_nat(ct); + + if (nf_nat_proto_remove(ct, data)) + return 1; + + if (!nat || !nat->ct) + return 0; + + /* This netns is being destroyed, and conntrack has nat null binding. + * Remove it from bysource hash, as the table will be freed soon. + * + * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() + * will delete entry from already-freed table. + */ + if (!del_timer(&ct->timeout)) + return 1; + + spin_lock_bh(&nf_nat_lock); + hlist_del_rcu(&nat->bysource); + ct->status &= ~IPS_NAT_DONE_MASK; + nat->ct = NULL; + spin_unlock_bh(&nf_nat_lock); + + add_timer(&ct->timeout); + + /* don't delete conntrack. Although that would make things a lot + * simpler, we'd end up flushing all conntracks on nat rmmod. + */ + return 0; +} + static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) { struct nf_nat_proto_clean clean = { @@ -748,7 +781,7 @@ static void __net_exit nf_nat_net_exit(struct net *net) { struct nf_nat_proto_clean clean = {}; - nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean); + nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); synchronize_rcu(); nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index ca71056..9088c68 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -723,6 +723,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) newxpt = xprt->xpt_ops->xpo_accept(xprt); if (newxpt) svc_add_new_temp_xprt(serv, newxpt); + else + module_put(xprt->xpt_class->xcl_owner); } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { /* XPT_DATA|XPT_DEFERRED case: */ dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h index 9d1421e..49b582a 100644 --- a/scripts/recordmcount.h +++ b/scripts/recordmcount.h @@ -163,11 +163,11 @@ static int mcount_adjust = 0; static int MIPS_is_fake_mcount(Elf_Rel const *rp) { - static Elf_Addr old_r_offset; + static Elf_Addr old_r_offset = ~(Elf_Addr)0; Elf_Addr current_r_offset = _w(rp->r_offset); int is_fake; - is_fake = old_r_offset && + is_fake = (old_r_offset != ~(Elf_Addr)0) && (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET); old_r_offset = current_r_offset; diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index eb54845..1db13b3 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -274,12 +274,20 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name, * @xattr_value: pointer to the new extended attribute value * @xattr_value_len: pointer to the new extended attribute value length * - * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that - * the current value is valid. + * Before allowing the 'security.evm' protected xattr to be updated, + * verify the existing value is valid. As only the kernel should have + * access to the EVM encrypted key needed to calculate the HMAC, prevent + * userspace from writing HMAC value. Writing 'security.evm' requires + * requires CAP_SYS_ADMIN privileges. */ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len) { + const struct evm_ima_xattr_data *xattr_data = xattr_value; + + if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0) + && (xattr_data->type == EVM_XATTR_HMAC)) + return -EPERM; return evm_protect_xattr(dentry, xattr_name, xattr_value, xattr_value_len); } diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index b21ee5b..bb65b0d 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -21,6 +21,36 @@ #include <linux/slab.h> #include "ima.h" +/** + * ima_kernel_read - read file content + * + * This is a function for reading file content instead of kernel_read(). + * It does not perform locking checks to ensure it cannot be blocked. + * It does not perform security checks because it is irrelevant for IMA. + * + */ +static int ima_kernel_read(struct file *file, loff_t offset, + char *addr, unsigned long count) +{ + mm_segment_t old_fs; + char __user *buf = addr; + ssize_t ret; + + if (!(file->f_mode & FMODE_READ)) + return -EBADF; + if (!file->f_op->read && !file->f_op->aio_read) + return -EINVAL; + + old_fs = get_fs(); + set_fs(get_ds()); + if (file->f_op->read) + ret = file->f_op->read(file, buf, count, &offset); + else + ret = do_sync_read(file, buf, count, &offset); + set_fs(old_fs); + return ret; +} + static int init_desc(struct hash_desc *desc) { int rc; @@ -67,7 +97,7 @@ int ima_calc_hash(struct file *file, char *digest) while (offset < i_size) { int rbuf_len; - rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE); + rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE); if (rbuf_len < 0) { rc = rbuf_len; break; diff --git a/sound/core/control.c b/sound/core/control.c index 8c7c2c9..6919da7 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -289,6 +289,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card, { struct snd_kcontrol *kctl; + /* Make sure that the ids assigned to the control do not wrap around */ + if (card->last_numid >= UINT_MAX - count) + card->last_numid = 0; + list_for_each_entry(kctl, &card->controls, list) { if (kctl->id.numid < card->last_numid + 1 + count && kctl->id.numid + kctl->count > card->last_numid + 1) { @@ -330,6 +334,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) { struct snd_ctl_elem_id id; unsigned int idx; + unsigned int count; int err = -EINVAL; if (! kcontrol) @@ -337,6 +342,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) if (snd_BUG_ON(!card || !kcontrol->info)) goto error; id = kcontrol->id; + if (id.index > UINT_MAX - kcontrol->count) + goto error; + down_write(&card->controls_rwsem); if (snd_ctl_find_id(card, &id)) { up_write(&card->controls_rwsem); @@ -358,8 +366,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol) card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; + count = kcontrol->count; up_write(&card->controls_rwsem); - for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) + for (idx = 0; idx < count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; @@ -388,6 +397,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol, bool add_on_replace) { struct snd_ctl_elem_id id; + unsigned int count; unsigned int idx; struct snd_kcontrol *old; int ret; @@ -423,8 +433,9 @@ add: card->controls_count += kcontrol->count; kcontrol->id.numid = card->last_numid + 1; card->last_numid += kcontrol->count; + count = kcontrol->count; up_write(&card->controls_rwsem); - for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++) + for (idx = 0; idx < count; idx++, id.index++, id.numid++) snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id); return 0; @@ -895,9 +906,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file, result = kctl->put(kctl, control); } if (result > 0) { + struct snd_ctl_elem_id id = control->id; up_read(&card->controls_rwsem); - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, - &control->id); + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id); return 0; } } @@ -989,6 +1000,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file, struct user_element { struct snd_ctl_elem_info info; + struct snd_card *card; void *elem_data; /* element data */ unsigned long elem_data_size; /* size of element data in bytes */ void *tlv_data; /* TLV data */ @@ -1032,7 +1044,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol, { struct user_element *ue = kcontrol->private_data; + mutex_lock(&ue->card->user_ctl_lock); memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size); + mutex_unlock(&ue->card->user_ctl_lock); return 0; } @@ -1041,10 +1055,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol, { int change; struct user_element *ue = kcontrol->private_data; - + + mutex_lock(&ue->card->user_ctl_lock); change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0; if (change) memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size); + mutex_unlock(&ue->card->user_ctl_lock); return change; } @@ -1064,19 +1080,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol, new_data = memdup_user(tlv, size); if (IS_ERR(new_data)) return PTR_ERR(new_data); + mutex_lock(&ue->card->user_ctl_lock); change = ue->tlv_data_size != size; if (!change) change = memcmp(ue->tlv_data, new_data, size); kfree(ue->tlv_data); ue->tlv_data = new_data; ue->tlv_data_size = size; + mutex_unlock(&ue->card->user_ctl_lock); } else { - if (! ue->tlv_data_size || ! ue->tlv_data) - return -ENXIO; - if (size < ue->tlv_data_size) - return -ENOSPC; + int ret = 0; + + mutex_lock(&ue->card->user_ctl_lock); + if (!ue->tlv_data_size || !ue->tlv_data) { + ret = -ENXIO; + goto err_unlock; + } + if (size < ue->tlv_data_size) { + ret = -ENOSPC; + goto err_unlock; + } if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size)) - return -EFAULT; + ret = -EFAULT; +err_unlock: + mutex_unlock(&ue->card->user_ctl_lock); + if (ret) + return ret; } return change; } @@ -1134,8 +1163,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, struct user_element *ue; int idx, err; - if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS) - return -ENOMEM; if (info->count < 1) return -EINVAL; access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE : @@ -1144,21 +1171,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE)); info->id.numid = 0; memset(&kctl, 0, sizeof(kctl)); - down_write(&card->controls_rwsem); - _kctl = snd_ctl_find_id(card, &info->id); - err = 0; - if (_kctl) { - if (replace) - err = snd_ctl_remove(card, _kctl); - else - err = -EBUSY; - } else { - if (replace) - err = -ENOENT; + + if (replace) { + err = snd_ctl_remove_user_ctl(file, &info->id); + if (err) + return err; } - up_write(&card->controls_rwsem); - if (err < 0) - return err; + + if (card->user_ctl_count >= MAX_USER_CONTROLS) + return -ENOMEM; + memcpy(&kctl.id, &info->id, sizeof(info->id)); kctl.count = info->owner ? info->owner : 1; access |= SNDRV_CTL_ELEM_ACCESS_USER; @@ -1208,6 +1230,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file, ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL); if (ue == NULL) return -ENOMEM; + ue->card = card; ue->info = *info; ue->info.access = 0; ue->elem_data = (char *)ue + sizeof(*ue); @@ -1319,8 +1342,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file, } err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv); if (err > 0) { + struct snd_ctl_elem_id id = kctl->id; up_read(&card->controls_rwsem); - snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id); + snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id); return 0; } } else { diff --git a/sound/core/init.c b/sound/core/init.c index 7b012d1..41020d7 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -208,6 +208,7 @@ int snd_card_create(int idx, const char *xid, INIT_LIST_HEAD(&card->devices); init_rwsem(&card->controls_rwsem); rwlock_init(&card->ctl_files_rwlock); + mutex_init(&card->user_ctl_lock); INIT_LIST_HEAD(&card->controls); INIT_LIST_HEAD(&card->ctl_files); spin_lock_init(&card->files_lock); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6cc07f0..8609cf2 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7282,6 +7282,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 }, { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, + { .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 }, { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 }, diff --git a/sound/usb/card.c b/sound/usb/card.c index 3e4565f..3e42e2b 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -289,6 +289,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) static int snd_usb_audio_free(struct snd_usb_audio *chip) { + struct list_head *p, *n; + + list_for_each_safe(p, n, &chip->ep_list) + snd_usb_endpoint_free(p); + mutex_destroy(&chip->mutex); kfree(chip); return 0; @@ -564,7 +569,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, struct snd_usb_audio *chip) { struct snd_card *card; - struct list_head *p, *n; + struct list_head *p; if (chip == (void *)-1L) return; @@ -577,14 +582,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, mutex_lock(®ister_mutex); chip->num_interfaces--; if (chip->num_interfaces <= 0) { + struct snd_usb_endpoint *ep; + snd_card_disconnect(card); /* release the pcm resources */ list_for_each(p, &chip->pcm_list) { snd_usb_stream_disconnect(p); } /* release the endpoint resources */ - list_for_each_safe(p, n, &chip->ep_list) { - snd_usb_endpoint_free(p); + list_for_each_entry(ep, &chip->ep_list, list) { + snd_usb_endpoint_release(ep); } /* release the midi resources */ list_for_each(p, &chip->midi_list) { diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index f46a4e0..a86ccd7 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -950,19 +950,30 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep) } /** + * snd_usb_endpoint_release: Tear down an snd_usb_endpoint + * + * @ep: the endpoint to release + * + * This function does not care for the endpoint's use count but will tear + * down all the streaming URBs immediately. + */ +void snd_usb_endpoint_release(struct snd_usb_endpoint *ep) +{ + release_urbs(ep, 1); +} + +/** * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint * * @ep: the list header of the endpoint to free * - * This function does not care for the endpoint's use count but will tear - * down all the streaming URBs immediately and free all resources. + * This free all resources of the given ep. */ void snd_usb_endpoint_free(struct list_head *head) { struct snd_usb_endpoint *ep; ep = list_entry(head, struct snd_usb_endpoint, list); - release_urbs(ep, 1); kfree(ep); } diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h index 447902d..b21f844 100644 --- a/sound/usb/endpoint.h +++ b/sound/usb/endpoint.h @@ -21,6 +21,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep); void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); +void snd_usb_endpoint_release(struct snd_usb_endpoint *ep); void snd_usb_endpoint_free(struct list_head *head); int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html