diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl index f2413ac..ac60e03 100644 --- a/Documentation/DocBook/media_api.tmpl +++ b/Documentation/DocBook/media_api.tmpl @@ -1,6 +1,6 @@ <?xml version="1.0"?> -<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" - "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [ +<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" + "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [ <!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities; <!ENTITY media-indices SYSTEM "./media-indices.tmpl"> diff --git a/Makefile b/Makefile index 5eba017..d4b678c 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 8 SUBLEVEL = 13 -EXTRAVERSION = .9 +EXTRAVERSION = .10 NAME = Remoralised Urchins Update # *DOCUMENTATION* diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h index ec08740..6f938cc 100644 --- a/arch/arm/mach-versatile/include/mach/platform.h +++ b/arch/arm/mach-versatile/include/mach/platform.h @@ -231,12 +231,14 @@ /* PCI space */ #define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */ #define VERSATILE_PCI_CFG_BASE 0x42000000 +#define VERSATILE_PCI_IO_BASE 0x43000000 #define VERSATILE_PCI_MEM_BASE0 0x44000000 #define VERSATILE_PCI_MEM_BASE1 0x50000000 #define VERSATILE_PCI_MEM_BASE2 0x60000000 /* Sizes of above maps */ #define VERSATILE_PCI_BASE_SIZE 0x01000000 #define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000 +#define VERSATILE_PCI_IO_BASE_SIZE 0x01000000 #define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */ #define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */ #define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */ diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index e92e5e0..c97be4e 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -43,9 +43,9 @@ #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0) #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4) #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8) -#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10) -#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14) -#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18) +#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14) +#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18) +#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c) #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc) #define DEVICE_ID_OFFSET 0x00 @@ -170,8 +170,8 @@ static struct pci_ops pci_versatile_ops = { .write = versatile_write_config, }; -static struct resource io_mem = { - .name = "PCI I/O space", +static struct resource unused_mem = { + .name = "PCI unused", .start = VERSATILE_PCI_MEM_BASE0, .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1, .flags = IORESOURCE_MEM, @@ -195,9 +195,9 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys) { int ret = 0; - ret = request_resource(&iomem_resource, &io_mem); + ret = request_resource(&iomem_resource, &unused_mem); if (ret) { - printk(KERN_ERR "PCI: unable to allocate I/O " + printk(KERN_ERR "PCI: unable to allocate unused " "memory region (%d)\n", ret); goto out; } @@ -205,7 +205,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys) if (ret) { printk(KERN_ERR "PCI: unable to allocate non-prefetchable " "memory region (%d)\n", ret); - goto release_io_mem; + goto release_unused_mem; } ret = request_resource(&iomem_resource, &pre_mem); if (ret) { @@ -225,8 +225,8 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys) release_non_mem: release_resource(&non_mem); - release_io_mem: - release_resource(&io_mem); + release_unused_mem: + release_resource(&unused_mem); out: return ret; } @@ -246,7 +246,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys) goto out; } - ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0); + ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE); if (ret) goto out; @@ -295,6 +295,19 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys) __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2); /* + * For many years the kernel and QEMU were symbiotically buggy + * in that they both assumed the same broken IRQ mapping. + * QEMU therefore attempts to auto-detect old broken kernels + * so that they still work on newer QEMU as they did on old + * QEMU. Since we now use the correct (ie matching-hardware) + * IRQ mapping we write a definitely different value to a + * PCI_INTERRUPT_LINE register to tell QEMU that we expect + * real hardware behaviour and it need not be backwards + * compatible for us. This write is harmless on real hardware. + */ + __raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE); + + /* * Do not to map Versatile FPGA PCI device into memory space */ pci_slot_ignore |= (1 << myslot); @@ -327,13 +340,13 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; - /* slot, pin, irq - * 24 1 IRQ_SIC_PCI0 - * 25 1 IRQ_SIC_PCI1 - * 26 1 IRQ_SIC_PCI2 - * 27 1 IRQ_SIC_PCI3 + /* + * Slot INTA INTB INTC INTD + * 31 PCI1 PCI2 PCI3 PCI0 + * 30 PCI0 PCI1 PCI2 PCI3 + * 29 PCI3 PCI0 PCI1 PCI2 */ - irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3); + irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3); return irq; } diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 34e1bca..a98ba4fd 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -325,7 +325,10 @@ validate_event(struct pmu_hw_events *hw_events, if (is_software_event(event)) return 1; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, &fake_event) >= 0; @@ -781,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] /* * PMXEVTYPER: Event selection reg */ -#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ +#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ /* diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c index 579f452..300d7d3 100644 --- a/arch/mips/ath79/clock.c +++ b/arch/mips/ath79/clock.c @@ -164,7 +164,7 @@ static void __init ar933x_clocks_init(void) ath79_ahb_clk.rate = freq / t; } - ath79_wdt_clk.rate = ath79_ref_clk.rate; + ath79_wdt_clk.rate = ath79_ahb_clk.rate; ath79_uart_clk.rate = ath79_ref_clk.rate; } diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index ee5b690..52e5758 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs) nb = aligninfo[instr].len; flags = aligninfo[instr].flags; + /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ + if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { + nb = 8; + flags = LD+SW; + } else if (IS_XFORM(instruction) && + ((instruction >> 1) & 0x3ff) == 660) { + nb = 8; + flags = ST+SW; + } + /* Byteswap little endian loads and stores */ swiz = 0; if (regs->msr & MSR_LE) { diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index ca55882..28f6ccd 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -353,7 +353,7 @@ static int alloc_dispatch_log_kmem_cache(void) } early_initcall(alloc_dispatch_log_kmem_cache); -static void pSeries_idle(void) +static void pseries_lpar_idle(void) { /* This would call on the cpuidle framework, and the back-end pseries * driver to go to idle states @@ -361,10 +361,22 @@ static void pSeries_idle(void) if (cpuidle_idle_call()) { /* On error, execute default handler * to go into low thread priority and possibly - * low power mode. + * low power mode by cedeing processor to hypervisor */ - HMT_low(); - HMT_very_low(); + + /* Indicate to hypervisor that we are idle. */ + get_lppaca()->idle = 1; + + /* + * Yield the processor to the hypervisor. We return if + * an external interrupt occurs (which are driven prior + * to returning here) or if a prod occurs from another + * processor. When returning here, external interrupts + * are enabled. + */ + cede_processor(); + + get_lppaca()->idle = 0; } } @@ -453,15 +465,14 @@ static void __init pSeries_setup_arch(void) pSeries_nvram_init(); - if (firmware_has_feature(FW_FEATURE_SPLPAR)) { + if (firmware_has_feature(FW_FEATURE_LPAR)) { vpa_init(boot_cpuid); - ppc_md.power_save = pSeries_idle; - } - - if (firmware_has_feature(FW_FEATURE_LPAR)) + ppc_md.power_save = pseries_lpar_idle; ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; - else + } else { + /* No special idle routine */ ppc_md.enable_pmcs = power4_enable_pmcs; + } if (firmware_has_feature(FW_FEATURE_SET_MODE)) { long rc; diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index a1daf4a..fb53906 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -467,7 +467,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp); + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index 46fc474..f50de69 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, int len, __wsum sum, int *err_ptr) { + __wsum ret; + might_sleep(); - return csum_partial_copy_generic((__force void *)src, dst, - len, sum, err_ptr, NULL); + stac(); + ret = csum_partial_copy_generic((__force void *)src, dst, + len, sum, err_ptr, NULL); + clac(); + + return ret; } /* @@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src, int len, __wsum sum, int *err_ptr) { + __wsum ret; + might_sleep(); - if (access_ok(VERIFY_WRITE, dst, len)) - return csum_partial_copy_generic(src, (__force void *)dst, - len, sum, NULL, err_ptr); + if (access_ok(VERIFY_WRITE, dst, len)) { + stac(); + ret = csum_partial_copy_generic(src, (__force void *)dst, + len, sum, NULL, err_ptr); + clac(); + return ret; + } if (len) *err_ptr = -EFAULT; diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index d6bf1f3..1697f73 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -364,7 +364,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - err |= __save_altstack(&frame->uc.uc_stack, regs->sp); + save_altstack_ex(&frame->uc.uc_stack, regs->sp); /* Set up to return from userspace. */ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); @@ -429,7 +429,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - err |= __save_altstack(&frame->uc.uc_stack, regs->sp); + save_altstack_ex(&frame->uc.uc_stack, regs->sp); /* Set up to return from userspace. If provided, use a stub already in userspace. */ @@ -496,7 +496,7 @@ static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, else put_user_ex(0, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); - err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp); + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); put_user_ex(0, &frame->uc.uc__pad0); if (ka->sa.sa_flags & SA_RESTORER) { diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 20a4fd4..db141f2 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -7,8 +7,7 @@ * kernel and insert a module (lg.ko) which allows us to run other Linux * kernels the same way we'd run processes. We call the first kernel the Host, * and the others the Guests. The program which sets up and configures Guests - * (such as the example in Documentation/virtual/lguest/lguest.c) is called the - * Launcher. + * (such as the example in tools/lguest/lguest.c) is called the Launcher. * * Secondly, we only run specially modified Guests, not normal kernels: setting * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index 25b7ae8..7609e0e 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -6,6 +6,7 @@ */ #include <asm/checksum.h> #include <linux/module.h> +#include <asm/smap.h> /** * csum_partial_copy_from_user - Copy and checksum from user space. @@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst, len -= 2; } } + stac(); isum = csum_partial_copy_generic((__force const void *)src, dst, len, isum, errp, NULL); + clac(); if (unlikely(*errp)) goto out_err; @@ -82,6 +85,8 @@ __wsum csum_partial_copy_to_user(const void *src, void __user *dst, int len, __wsum isum, int *errp) { + __wsum ret; + might_sleep(); if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { @@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst, } *errp = 0; - return csum_partial_copy_generic(src, (void __force *)dst, - len, isum, NULL, errp); + stac(); + ret = csum_partial_copy_generic(src, (void __force *)dst, + len, isum, NULL, errp); + clac(); + return ret; } EXPORT_SYMBOL(csum_partial_copy_to_user); diff --git a/crypto/api.c b/crypto/api.c index 3b61803..37c4c72 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem); BLOCKING_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; @@ -144,8 +146,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, } up_write(&crypto_alg_sem); - if (alg != &larval->alg) + if (alg != &larval->alg) { kfree(larval); + if (crypto_is_larval(alg)) + alg = crypto_larval_wait(alg); + } return alg; } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index a563e11..0b58c9d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -983,6 +983,14 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { ec_enlarge_storm_threshold, "CLEVO hardware", { DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, + { + ec_skip_dsdt_scan, "HP Folio 13", { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL}, + { + ec_validate_ecdt, "ASUS hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, {}, }; diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c index 16ed068..917a3ab 100644 --- a/drivers/clk/clk-wm831x.c +++ b/drivers/clk/clk-wm831x.c @@ -360,6 +360,8 @@ static int wm831x_clk_probe(struct platform_device *pdev) if (!clkdata) return -ENOMEM; + clkdata->wm831x = wm831x; + /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2); if (ret < 0) { diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 2a297f8..f8a8636 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -106,6 +106,7 @@ struct cpuidle_coupled { cpumask_t coupled_cpus; int requested_state[NR_CPUS]; atomic_t ready_waiting_counts; + atomic_t abort_barrier; int online_count; int refcnt; int prevent; @@ -122,12 +123,19 @@ static DEFINE_MUTEX(cpuidle_coupled_lock); static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); /* - * The cpuidle_coupled_poked_mask mask is used to avoid calling + * The cpuidle_coupled_poke_pending mask is used to avoid calling * __smp_call_function_single with the per cpu call_single_data struct already * in use. This prevents a deadlock where two cpus are waiting for each others * call_single_data struct to be available */ -static cpumask_t cpuidle_coupled_poked_mask; +static cpumask_t cpuidle_coupled_poke_pending; + +/* + * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked + * once to minimize entering the ready loop with a poke pending, which would + * require aborting and retrying. + */ +static cpumask_t cpuidle_coupled_poked; /** * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus @@ -291,10 +299,11 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev, return state; } -static void cpuidle_coupled_poked(void *info) +static void cpuidle_coupled_handle_poke(void *info) { int cpu = (unsigned long)info; - cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask); + cpumask_set_cpu(cpu, &cpuidle_coupled_poked); + cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending); } /** @@ -313,7 +322,7 @@ static void cpuidle_coupled_poke(int cpu) { struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); - if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask)) + if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending)) __smp_call_function_single(cpu, csd, 0); } @@ -340,30 +349,19 @@ static void cpuidle_coupled_poke_others(int this_cpu, * @coupled: the struct coupled that contains the current cpu * @next_state: the index in drv->states of the requested state for this cpu * - * Updates the requested idle state for the specified cpuidle device, - * poking all coupled cpus out of idle if necessary to let them see the new - * state. + * Updates the requested idle state for the specified cpuidle device. + * Returns the number of waiting cpus. */ -static void cpuidle_coupled_set_waiting(int cpu, +static int cpuidle_coupled_set_waiting(int cpu, struct cpuidle_coupled *coupled, int next_state) { - int w; - coupled->requested_state[cpu] = next_state; /* - * If this is the last cpu to enter the waiting state, poke - * all the other cpus out of their waiting state so they can - * enter a deeper state. This can race with one of the cpus - * exiting the waiting state due to an interrupt and - * decrementing waiting_count, see comment below. - * * The atomic_inc_return provides a write barrier to order the write * to requested_state with the later write that increments ready_count. */ - w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; - if (w == coupled->online_count) - cpuidle_coupled_poke_others(cpu, coupled); + return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK; } /** @@ -410,19 +408,33 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled) * been processed and the poke bit has been cleared. * * Other interrupts may also be processed while interrupts are enabled, so - * need_resched() must be tested after turning interrupts off again to make sure + * need_resched() must be tested after this function returns to make sure * the interrupt didn't schedule work that should take the cpu out of idle. * - * Returns 0 if need_resched was false, -EINTR if need_resched was true. + * Returns 0 if no poke was pending, 1 if a poke was cleared. */ static int cpuidle_coupled_clear_pokes(int cpu) { + if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) + return 0; + local_irq_enable(); - while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask)) + while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending)) cpu_relax(); local_irq_disable(); - return need_resched() ? -EINTR : 0; + return 1; +} + +static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled) +{ + cpumask_t cpus; + int ret; + + cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus); + ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus); + + return ret; } /** @@ -449,31 +461,56 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev, { int entered_state = -1; struct cpuidle_coupled *coupled = dev->coupled; + int w; if (!coupled) return -EINVAL; while (coupled->prevent) { - if (cpuidle_coupled_clear_pokes(dev->cpu)) { + cpuidle_coupled_clear_pokes(dev->cpu); + if (need_resched()) { local_irq_enable(); return entered_state; } entered_state = cpuidle_enter_state(dev, drv, dev->safe_state_index); + local_irq_disable(); } /* Read barrier ensures online_count is read after prevent is cleared */ smp_rmb(); - cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); +reset: + cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked); + + w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state); + /* + * If this is the last cpu to enter the waiting state, poke + * all the other cpus out of their waiting state so they can + * enter a deeper state. This can race with one of the cpus + * exiting the waiting state due to an interrupt and + * decrementing waiting_count, see comment below. + */ + if (w == coupled->online_count) { + cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked); + cpuidle_coupled_poke_others(dev->cpu, coupled); + } retry: /* * Wait for all coupled cpus to be idle, using the deepest state - * allowed for a single cpu. + * allowed for a single cpu. If this was not the poking cpu, wait + * for at least one poke before leaving to avoid a race where + * two cpus could arrive at the waiting loop at the same time, + * but the first of the two to arrive could skip the loop without + * processing the pokes from the last to arrive. */ - while (!cpuidle_coupled_cpus_waiting(coupled)) { - if (cpuidle_coupled_clear_pokes(dev->cpu)) { + while (!cpuidle_coupled_cpus_waiting(coupled) || + !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) { + if (cpuidle_coupled_clear_pokes(dev->cpu)) + continue; + + if (need_resched()) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } @@ -485,14 +522,22 @@ retry: entered_state = cpuidle_enter_state(dev, drv, dev->safe_state_index); + local_irq_disable(); } - if (cpuidle_coupled_clear_pokes(dev->cpu)) { + cpuidle_coupled_clear_pokes(dev->cpu); + if (need_resched()) { cpuidle_coupled_set_not_waiting(dev->cpu, coupled); goto out; } /* + * Make sure final poke status for this cpu is visible before setting + * cpu as ready. + */ + smp_wmb(); + + /* * All coupled cpus are probably idle. There is a small chance that * one of the other cpus just became active. Increment the ready count, * and spin until all coupled cpus have incremented the counter. Once a @@ -511,6 +556,28 @@ retry: cpu_relax(); } + /* + * Make sure read of all cpus ready is done before reading pending pokes + */ + smp_rmb(); + + /* + * There is a small chance that a cpu left and reentered idle after this + * cpu saw that all cpus were waiting. The cpu that reentered idle will + * have sent this cpu a poke, which will still be pending after the + * ready loop. The pending interrupt may be lost by the interrupt + * controller when entering the deep idle state. It's not possible to + * clear a pending interrupt without turning interrupts on and handling + * it, and it's too late to turn on interrupts here, so reset the + * coupled idle state of all cpus and retry. + */ + if (cpuidle_coupled_any_pokes_pending(coupled)) { + cpuidle_coupled_set_done(dev->cpu, coupled); + /* Wait for all cpus to see the pending pokes */ + cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier); + goto reset; + } + /* all cpus have acked the coupled state */ next_state = cpuidle_coupled_get_state(dev, coupled); @@ -596,7 +663,7 @@ have_coupled: coupled->refcnt++; csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); - csd->func = cpuidle_coupled_poked; + csd->func = cpuidle_coupled_handle_poke; csd->info = (void *)(unsigned long)dev->cpu; return 0; diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bcb2c0a..8663a83 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2464,7 +2464,22 @@ int drm_mode_getfb(struct drm_device *dev, r->depth = fb->depth; r->bpp = fb->bits_per_pixel; r->pitch = fb->pitches[0]; - fb->funcs->create_handle(fb, file_priv, &r->handle); + if (fb->funcs->create_handle) { + if (file_priv->is_master || capable(CAP_SYS_ADMIN)) { + ret = fb->funcs->create_handle(fb, file_priv, + &r->handle); + } else { + /* GET_FB() is an unprivileged ioctl so we must not + * return a buffer-handle to non-master processes! For + * backwards-compatibility reasons, we cannot make + * GET_FB() privileged, so just return an invalid handle + * for non-masters. */ + r->handle = 0; + ret = 0; + } + } else { + ret = -ENODEV; + } out: mutex_unlock(&dev->mode_config.mutex); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index dfd9ed3..60d6b35 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -125,6 +125,9 @@ static struct edid_quirk { /* ViewSonic VA2026w */ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, + + /* Medion MD 30217 PG */ + { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, }; /*** DDC fetch and block validation ***/ diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 7741c22..494fbbf 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -165,7 +165,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) max = intel_panel_get_max_backlight(dev); intel_panel_set_backlight(dev, bclp * max / 255); - iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); + iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); return 0; } diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 32501f6..1602398 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -50,7 +50,7 @@ static char *pre_emph_names[] = { * or from atom. Note that atom operates on * dw units. */ -static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) +void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) { #ifdef __BIG_ENDIAN u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ @@ -100,7 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); - radeon_copy_swap(base, send, send_bytes, true); + radeon_atom_copy_swap(base, send, send_bytes, true); args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); @@ -137,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, recv_bytes = recv_size; if (recv && recv_size) - radeon_copy_swap(recv, base + 16, recv_bytes, false); + radeon_atom_copy_swap(recv, base + 16, recv_bytes, false); return recv_bytes; } diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c index 082338d..2ca389d 100644 --- a/drivers/gpu/drm/radeon/atombios_i2c.c +++ b/drivers/gpu/drm/radeon/atombios_i2c.c @@ -27,6 +27,8 @@ #include "radeon.h" #include "atom.h" +extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); + #define TARGET_HW_I2C_CLOCK 50 /* these are a limitation of ProcessI2cChannelTransaction not the hw */ @@ -77,7 +79,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, } if (!(flags & HW_I2C_WRITE)) - memcpy(buf, base, num); + radeon_atom_copy_swap(buf, base, num, false); return 0; } diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index eeb1aad..c7a0488 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -724,7 +724,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { - u32 tmp; + u32 tmp, buffer_alloc, i; + u32 pipe_offset = radeon_crtc->crtc_id * 0x20; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. @@ -747,18 +748,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, * non-linked crtcs for maximum line buffer allocation. */ if (radeon_crtc->base.enabled && mode) { - if (other_mode) + if (other_mode) { tmp = 0; /* 1/2 */ - else + buffer_alloc = 1; + } else { tmp = 2; /* whole */ - } else + buffer_alloc = 2; + } + } else { tmp = 0; + buffer_alloc = 0; + } /* second controller of the pair uses second half of the lb */ if (radeon_crtc->crtc_id % 2) tmp += 4; WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + } + if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 0bfd0e9e..f410b4d 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -760,6 +760,10 @@ # define LATENCY_LOW_WATERMARK(x) ((x) << 0) # define LATENCY_HIGH_WATERMARK(x) ((x) << 16) +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0) +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4) + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 96168ef..6048f2b 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -715,13 +715,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); + u8 *num_dst_objs = (u8 *) + ((u8 *)router_src_dst_table + 1 + + (router_src_dst_table->ucNumberOfSrc * 2)); + u16 *dst_objs = (u16 *)(num_dst_objs + 1); int enum_id; router.router_id = router_obj_id; - for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; - enum_id++) { + for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) { if (le16_to_cpu(path->usConnObjectId) == - le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) + le16_to_cpu(dst_objs[enum_id])) break; } @@ -1651,7 +1654,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct kfree(edid); } } - record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); + record += fake_edid_record->ucFakeEDIDLength ? + fake_edid_record->ucFakeEDIDLength + 2 : + sizeof(ATOM_FAKE_EDID_PATCH_RECORD); break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 4f3b4d5..ecd62b8 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -256,16 +256,18 @@ int radeon_irq_kms_init(struct radeon_device *rdev) dev_info(rdev->dev, "radeon: using MSI.\n"); } } + + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); + rdev->irq.installed = true; r = drm_irq_install(rdev->ddev); if (r) { rdev->irq.installed = false; + flush_work(&rdev->hotplug_work); return r; } - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); - INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); - DRM_INFO("radeon: irq initialized.\n"); return 0; } diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 233a9b9..b8074a8 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev) /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { - WREG32_MC(RS480_MC_MISC_CNTL, - (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); + tmp = RREG32_MC(RS480_MC_MISC_CNTL); + tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN; + WREG32_MC(RS480_MC_MISC_CNTL, tmp); } else { - WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); + tmp = RREG32_MC(RS480_MC_MISC_CNTL); + tmp |= RS480_GART_INDEX_REG_EN; + WREG32_MC(RS480_MC_MISC_CNTL, tmp); } /* Enable gart */ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 075d52e..7742831 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -411,7 +411,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { - u32 tmp; + u32 tmp, buffer_alloc, i; + u32 pipe_offset = radeon_crtc->crtc_id * 0x20; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. @@ -426,16 +427,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev, * non-linked crtcs for maximum line buffer allocation. */ if (radeon_crtc->base.enabled && mode) { - if (other_mode) + if (other_mode) { tmp = 0; /* 1/2 */ - else + buffer_alloc = 1; + } else { tmp = 2; /* whole */ - } else + buffer_alloc = 2; + } + } else { tmp = 0; + buffer_alloc = 0; + } WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, DC_LB_MEMORY_CONFIG(tmp)); + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index e9a01f0..30c5dbc 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -62,6 +62,10 @@ #define DMIF_ADDR_CALC 0xC00 +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0) +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4) + #define SRBM_STATUS 0xE50 #define SRBM_SOFT_RESET 0x0E60 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 08eb32a..869ead1 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type, struct hid_report_enum *report_enum = device->report_enum + type; struct hid_report *report; + if (id >= HID_MAX_IDS) + return NULL; if (report_enum->report_id_hash[id]) return report_enum->report_id_hash[id]; @@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) case HID_GLOBAL_ITEM_TAG_REPORT_ID: parser->global.report_id = item_udata(item); - if (parser->global.report_id == 0) { - hid_err(parser->device, "report_id 0 is invalid\n"); + if (parser->global.report_id == 0 || + parser->global.report_id >= HID_MAX_IDS) { + hid_err(parser->device, "report_id %u is invalid\n", + parser->global.report_id); return -1; } return 0; @@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device) for (i = 0; i < HID_REPORT_TYPES; i++) { struct hid_report_enum *report_enum = device->report_enum + i; - for (j = 0; j < 256; j++) { + for (j = 0; j < HID_MAX_IDS; j++) { struct hid_report *report = report_enum->report_id_hash[j]; if (report) hid_free_report(report); @@ -1153,7 +1157,12 @@ EXPORT_SYMBOL_GPL(hid_output_report); int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) { - unsigned size = field->report_size; + unsigned size; + + if (!field) + return -1; + + size = field->report_size; hid_dump_input(field->report->device, field->usage + offset, value); diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 21b196c..f495ada 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -340,7 +340,7 @@ static int hidinput_get_battery_property(struct power_supply *psy, { struct hid_device *dev = container_of(psy, struct hid_device, battery); int ret = 0; - __u8 buf[2] = {}; + __u8 *buf; switch (prop) { case POWER_SUPPLY_PROP_PRESENT: @@ -349,13 +349,19 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CAPACITY: + + buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + break; + } ret = dev->hid_get_raw_report(dev, dev->battery_report_id, - buf, sizeof(buf), + buf, 2, dev->battery_report_type); if (ret != 2) { - if (ret >= 0) - ret = -EINVAL; + ret = -ENODATA; + kfree(buf); break; } @@ -364,6 +370,7 @@ static int hidinput_get_battery_property(struct power_supply *psy, buf[1] <= dev->battery_max) val->intval = (100 * (buf[1] - dev->battery_min)) / (dev->battery_max - dev->battery_min); + kfree(buf); break; case POWER_SUPPLY_PROP_MODEL_NAME: diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index 86a969f..60bd823 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c @@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev) struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT]. report_id_hash[0x0d]; - if (!report) + if (!report || report->maxfield < 1 || + report->field[0]->report_count < 1) return -EINVAL; usbhid_submit_report(hdev, report, USB_DIR_IN); diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c index a79e95b..1a79a1e 100644 --- a/drivers/hid/hid-picolcd_cir.c +++ b/drivers/hid/hid-picolcd_cir.c @@ -147,6 +147,7 @@ void picolcd_exit_cir(struct picolcd_data *data) struct rc_dev *rdev = data->rc_dev; data->rc_dev = NULL; - rc_unregister_device(rdev); + if (rdev) + rc_unregister_device(rdev); } diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c index 86df26e..775252d 100644 --- a/drivers/hid/hid-picolcd_core.c +++ b/drivers/hid/hid-picolcd_core.c @@ -292,7 +292,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev, buf += 10; cnt -= 10; } - if (!report) + if (!report || report->maxfield != 1) return -EINVAL; while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r')) diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c index eb00357..104df56 100644 --- a/drivers/hid/hid-picolcd_fb.c +++ b/drivers/hid/hid-picolcd_fb.c @@ -595,10 +595,14 @@ err_nomem: void picolcd_exit_framebuffer(struct picolcd_data *data) { struct fb_info *info = data->fb_info; - struct picolcd_fb_data *fbdata = info->par; + struct picolcd_fb_data *fbdata; unsigned long flags; + if (!info) + return; + device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate); + fbdata = info->par; /* disconnect framebuffer from HID dev */ spin_lock_irqsave(&fbdata->lock, flags); diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c index 47ed74c..00cd2f8 100644 --- a/drivers/hid/hid-pl.c +++ b/drivers/hid/hid-pl.c @@ -129,8 +129,14 @@ static int plff_init(struct hid_device *hid) strong = &report->field[0]->value[2]; weak = &report->field[0]->value[3]; debug("detected single-field device"); - } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 && - report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) { + } else if (report->field[0]->maxusage == 1 && + report->field[0]->usage[0].hid == + (HID_UP_LED | 0x43) && + report->maxfield >= 4 && + report->field[0]->report_count >= 1 && + report->field[1]->report_count >= 1 && + report->field[2]->report_count >= 1 && + report->field[3]->report_count >= 1) { report->field[0]->value[0] = 0x00; report->field[1]->value[0] = 0x00; strong = &report->field[2]->value[0]; diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 0bc58bd..5dfcf43 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -223,7 +223,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, mutex_lock(&data->mutex); report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); - if (!report || (field_index >= report->maxfield)) { + if (!report || (field_index >= report->maxfield) || + report->field[field_index]->report_count < 1) { ret = -EINVAL; goto done_proc; } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 413a731..cfe5288 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -113,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, __u8 *buf; int ret = 0; - if (!hidraw_table[minor]) { + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } @@ -261,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file) } mutex_lock(&minors_lock); - if (!hidraw_table[minor]) { + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { err = -ENODEV; goto out_unlock; } @@ -302,39 +302,38 @@ static int hidraw_fasync(int fd, struct file *file, int on) return fasync_helper(fd, file, on, &list->fasync); } +static void drop_ref(struct hidraw *hidraw, int exists_bit) +{ + if (exists_bit) { + hid_hw_close(hidraw->hid); + hidraw->exist = 0; + if (hidraw->open) + wake_up_interruptible(&hidraw->wait); + } else { + --hidraw->open; + } + + if (!hidraw->open && !hidraw->exist) { + device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); + hidraw_table[hidraw->minor] = NULL; + kfree(hidraw); + } +} + static int hidraw_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); - struct hidraw *dev; struct hidraw_list *list = file->private_data; - int ret; - int i; mutex_lock(&minors_lock); - if (!hidraw_table[minor]) { - ret = -ENODEV; - goto unlock; - } list_del(&list->node); - dev = hidraw_table[minor]; - if (!--dev->open) { - if (list->hidraw->exist) { - hid_hw_power(dev->hid, PM_HINT_NORMAL); - hid_hw_close(dev->hid); - } else { - kfree(list->hidraw); - } - } - - for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i) - kfree(list->buffer[i].value); kfree(list); - ret = 0; -unlock: - mutex_unlock(&minors_lock); - return ret; + drop_ref(hidraw_table[minor], 0); + + mutex_unlock(&minors_lock); + return 0; } static long hidraw_ioctl(struct file *file, unsigned int cmd, @@ -539,18 +538,9 @@ void hidraw_disconnect(struct hid_device *hid) struct hidraw *hidraw = hid->hidraw; mutex_lock(&minors_lock); - hidraw->exist = 0; - - device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); - hidraw_table[hidraw->minor] = NULL; + drop_ref(hidraw, 1); - if (hidraw->open) { - hid_hw_close(hid); - wake_up_interruptible(&hidraw->wait); - } else { - kfree(hidraw); - } mutex_unlock(&minors_lock); } EXPORT_SYMBOL_GPL(hidraw_disconnect); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f1e7b86..72f2ee9 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -887,56 +887,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain, return order; } +static void dma_pte_free_level(struct dmar_domain *domain, int level, + struct dma_pte *pte, unsigned long pfn, + unsigned long start_pfn, unsigned long last_pfn) +{ + pfn = max(start_pfn, pfn); + pte = &pte[pfn_level_offset(pfn, level)]; + + do { + unsigned long level_pfn; + struct dma_pte *level_pte; + + if (!dma_pte_present(pte) || dma_pte_superpage(pte)) + goto next; + + level_pfn = pfn & level_mask(level - 1); + level_pte = phys_to_virt(dma_pte_addr(pte)); + + if (level > 2) + dma_pte_free_level(domain, level - 1, level_pte, + level_pfn, start_pfn, last_pfn); + + /* If range covers entire pagetable, free it */ + if (!(start_pfn > level_pfn || + last_pfn < level_pfn + level_size(level))) { + dma_clear_pte(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); + free_pgtable_page(level_pte); + } +next: + pfn += level_size(level); + } while (!first_pte_in_page(++pte) && pfn <= last_pfn); +} + /* free page table pages. last level pte should already be cleared */ static void dma_pte_free_pagetable(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - struct dma_pte *first_pte, *pte; - int total = agaw_to_level(domain->agaw); - int level; - unsigned long tmp; - int large_page = 2; BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); BUG_ON(start_pfn > last_pfn); /* We don't need lock here; nobody else touches the iova range */ - level = 2; - while (level <= total) { - tmp = align_to_level(start_pfn, level); - - /* If we can't even clear one PTE at this level, we're done */ - if (tmp + level_size(level) - 1 > last_pfn) - return; - - do { - large_page = level; - first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page); - if (large_page > level) - level = large_page + 1; - if (!pte) { - tmp = align_to_level(tmp + 1, level + 1); - continue; - } - do { - if (dma_pte_present(pte)) { - free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); - dma_clear_pte(pte); - } - pte++; - tmp += level_size(level); - } while (!first_pte_in_page(pte) && - tmp + level_size(level) - 1 <= last_pfn); + dma_pte_free_level(domain, agaw_to_level(domain->agaw), + domain->pgd, 0, start_pfn, last_pfn); - domain_flush_cache(domain, first_pte, - (void *)pte - (void *)first_pte); - - } while (tmp && tmp + level_size(level) - 1 <= last_pfn); - level++; - } /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { free_pgtable_page(domain->pgd); diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c index 74a24cf..cb9ee1e 100644 --- a/drivers/leds/leds-wm831x-status.c +++ b/drivers/leds/leds-wm831x-status.c @@ -230,9 +230,9 @@ static int wm831x_status_probe(struct platform_device *pdev) int id = pdev->id % ARRAY_SIZE(chip_pdata->status); int ret; - res = platform_get_resource(pdev, IORESOURCE_IO, 0); + res = platform_get_resource(pdev, IORESOURCE_REG, 0); if (res == NULL) { - dev_err(&pdev->dev, "No I/O resource\n"); + dev_err(&pdev->dev, "No register resource\n"); ret = -EINVAL; goto err; } diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index 4a980e0..02d4970 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c @@ -1884,7 +1884,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids); #ifdef CONFIG_OF static const struct of_device_id coda_dt_ids[] = { - { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] }, + { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] }, { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, { /* sentinel */ } }; diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index 2b1b9f3..661c563 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c @@ -1121,10 +1121,14 @@ static int gsc_probe(struct platform_device *pdev) goto err_clk; } - ret = gsc_register_m2m_device(gsc); + ret = v4l2_device_register(dev, &gsc->v4l2_dev); if (ret) goto err_clk; + ret = gsc_register_m2m_device(gsc); + if (ret) + goto err_v4l2; + platform_set_drvdata(pdev, gsc); pm_runtime_enable(dev); ret = pm_runtime_get_sync(&pdev->dev); @@ -1146,6 +1150,8 @@ err_pm: pm_runtime_put(dev); err_m2m: gsc_unregister_m2m_device(gsc); +err_v4l2: + v4l2_device_unregister(&gsc->v4l2_dev); err_clk: gsc_clk_put(gsc); return ret; @@ -1156,6 +1162,7 @@ static int gsc_remove(struct platform_device *pdev) struct gsc_dev *gsc = platform_get_drvdata(pdev); gsc_unregister_m2m_device(gsc); + v4l2_device_unregister(&gsc->v4l2_dev); vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx); pm_runtime_disable(&pdev->dev); diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h index 5f157ef..a1b4148 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.h +++ b/drivers/media/platform/exynos-gsc/gsc-core.h @@ -343,6 +343,7 @@ struct gsc_dev { unsigned long state; struct vb2_alloc_ctx *alloc_ctx; struct video_device vdev; + struct v4l2_device v4l2_dev; }; /** diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c index c267c57..a11be54 100644 --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c @@ -732,6 +732,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc) gsc->vdev.release = video_device_release_empty; gsc->vdev.lock = &gsc->lock; gsc->vdev.vfl_dir = VFL_DIR_M2M; + gsc->vdev.v4l2_dev = &gsc->v4l2_dev; snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m", GSC_MODULE_NAME, gsc->id); diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 1bfbc32..604d2d9 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -772,6 +772,7 @@ static int g2d_probe(struct platform_device *pdev) } *vfd = g2d_videodev; vfd->lock = &dev->mutex; + vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index 84dc26f..e35fab4 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c @@ -309,6 +309,11 @@ static int hdpvr_probe(struct usb_interface *interface, dev->workqueue = 0; + /* init video transfer queues first of all */ + /* to prevent oops in hdpvr_delete() on error paths */ + INIT_LIST_HEAD(&dev->free_buff_list); + INIT_LIST_HEAD(&dev->rec_buff_list); + /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); @@ -331,10 +336,6 @@ static int hdpvr_probe(struct usb_interface *interface, if (!dev->workqueue) goto error; - /* init video transfer queues */ - INIT_LIST_HEAD(&dev->free_buff_list); - INIT_LIST_HEAD(&dev->rec_buff_list); - dev->options = hdpvr_default_options; if (default_video_input < HDPVR_VIDEO_INPUTS) @@ -388,7 +389,7 @@ static int hdpvr_probe(struct usb_interface *interface, if (hdpvr_register_videodev(dev, &interface->dev, video_nr[atomic_inc_return(&dev_nr)])) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); - goto error; + goto reg_fail; } #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 621c7a3..b83e3ca 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -759,7 +759,7 @@ static int ilo_probe(struct pci_dev *pdev, /* Ignore subsystem_device = 0x1979 (set by BIOS) */ if (pdev->subsystem_device == 0x1979) - goto out; + return 0; if (max_ccb > MAX_CCB) max_ccb = MAX_CCB; @@ -899,7 +899,7 @@ static void __exit ilo_exit(void) class_destroy(ilo_class); } -MODULE_VERSION("1.4"); +MODULE_VERSION("1.4.1"); MODULE_ALIAS(ILO_NAME); MODULE_DESCRIPTION(ILO_NAME); MODULE_AUTHOR("David Altobelli <david.altobelli@xxxxxx>"); diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index fff9286..491e9ec 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) pio: if (!desc) { /* DMA failed, fall back to PIO */ + tmio_mmc_enable_dma(host, false); if (ret >= 0) ret = -EIO; host->chan_rx = NULL; @@ -116,7 +117,6 @@ pio: } dev_warn(&host->pdev->dev, "DMA failed: %d, falling back to PIO\n", ret); - tmio_mmc_enable_dma(host, false); } dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, @@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) pio: if (!desc) { /* DMA failed, fall back to PIO */ + tmio_mmc_enable_dma(host, false); if (ret >= 0) ret = -EIO; host->chan_tx = NULL; @@ -197,7 +198,6 @@ pio: } dev_warn(&host->pdev->dev, "DMA failed: %d, falling back to PIO\n", ret); - tmio_mmc_enable_dma(host, false); } dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index db04f53..d831091 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2810,7 +2810,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw) if (!chip->select_chip) chip->select_chip = nand_select_chip; - if (!chip->read_byte) + + /* If called twice, pointers that depend on busw may need to be reset */ + if (!chip->read_byte || chip->read_byte == nand_read_byte) chip->read_byte = busw ? nand_read_byte16 : nand_read_byte; if (!chip->read_word) chip->read_word = nand_read_word; @@ -2818,9 +2820,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw) chip->block_bad = nand_block_bad; if (!chip->block_markbad) chip->block_markbad = nand_default_block_markbad; - if (!chip->write_buf) + if (!chip->write_buf || chip->write_buf == nand_write_buf) chip->write_buf = busw ? nand_write_buf16 : nand_write_buf; - if (!chip->read_buf) + if (!chip->read_buf || chip->read_buf == nand_read_buf) chip->read_buf = busw ? nand_read_buf16 : nand_read_buf; if (!chip->scan_bbt) chip->scan_bbt = nand_default_bbt; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 84b312ea..81d57e9 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -137,7 +137,9 @@ #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVNETA_GMAC_AN_SPEED_EN BIT(7) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) #define MVNETA_MIB_COUNTERS_BASE 0x3080 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 @@ -912,6 +914,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp) /* Assign port SDMA configuration */ mvreg_write(pp, MVNETA_SDMA_CONFIG, val); + /* Disable PHY polling in hardware, since we're using the + * kernel phylib to do this. + */ + val = mvreg_read(pp, MVNETA_UNIT_CONTROL); + val &= ~MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2299,7 +2308,9 @@ static void mvneta_adjust_link(struct net_device *ndev) val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); + MVNETA_GMAC_CONFIG_FULL_DUPLEX | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); if (phydev->duplex) val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 3afc24b..e962502 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1072,6 +1072,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, * is_on == 0 means MRC CCK is OFF (more noise imm) */ bool is_on = param ? 1 : 0; + + if (ah->caps.rx_chainmask == 1) + break; + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, AR_PHY_MRC_CCK_ENABLE, is_on); REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 42794c5..46e1fa7 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -79,10 +79,6 @@ struct ath_config { sizeof(struct ath_buf_state)); \ } while (0) -#define ATH_RXBUF_RESET(_bf) do { \ - (_bf)->bf_stale = false; \ - } while (0) - /** * enum buffer_type - Buffer type flags * @@ -322,6 +318,7 @@ struct ath_rx { struct ath_buf *rx_bufptr; struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; + struct ath_buf *buf_hold; struct sk_buff *frag; }; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 90752f2..262491d 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -41,8 +41,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) struct ath_desc *ds; struct sk_buff *skb; - ATH_RXBUF_RESET(bf); - ds = bf->bf_desc; ds->ds_link = 0; /* link to null */ ds->ds_data = bf->bf_buf_addr; @@ -69,6 +67,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) sc->rx.rxlink = &ds->ds_link; } +static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf) +{ + if (sc->rx.buf_hold) + ath_rx_buf_link(sc, sc->rx.buf_hold); + + sc->rx.buf_hold = bf; +} + static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) { /* XXX block beacon interrupts */ @@ -116,7 +122,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc, skb = bf->bf_mpdu; - ATH_RXBUF_RESET(bf); memset(skb->data, 0, ah->caps.rx_status_len); dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, ah->caps.rx_status_len, DMA_TO_DEVICE); @@ -444,6 +449,7 @@ int ath_startrecv(struct ath_softc *sc) if (list_empty(&sc->rx.rxbuf)) goto start_recv; + sc->rx.buf_hold = NULL; sc->rx.rxlink = NULL; list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { ath_rx_buf_link(sc, bf); @@ -689,6 +695,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, } bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); + if (bf == sc->rx.buf_hold) + return NULL; + ds = bf->bf_desc; /* @@ -1238,7 +1247,7 @@ requeue: if (edma) { ath_rx_edma_buf_link(sc, qtype); } else { - ath_rx_buf_link(sc, bf); + ath_rx_buf_relink(sc, bf); ath9k_hw_rxena(ah); } } while (1); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 90e48a0..e28f453 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2467,6 +2467,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) for (acno = 0, ac = &an->ac[acno]; acno < IEEE80211_NUM_ACS; acno++, ac++) { ac->sched = false; + ac->clear_ps_filter = true; ac->txq = sc->tx.txq_map[acno]; INIT_LIST_HEAD(&ac->tid_q); } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c index 1860c57..4fb9635 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c @@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di) /* * post receive buffers - * return false is refill failed completely and ring is empty this will stall - * the rx dma and user might want to call rxfill again asap. This unlikely - * happens on memory-rich NIC, but often on memory-constrained dongle + * Return false if refill failed completely or dma mapping failed. The ring + * is empty, which will stall the rx dma and user might want to call rxfill + * again asap. This is unlikely to happen on a memory-rich NIC, but often on + * memory-constrained dongle. */ bool dma_rxfill(struct dma_pub *pub) { @@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub) pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, DMA_FROM_DEVICE); + if (dma_mapping_error(di->dmadev, pa)) + return false; /* save the free packet pointer */ di->rxp[rxout] = p; @@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p) /* get physical address of buffer start */ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); - + /* if mapping failed, free skb */ + if (dma_mapping_error(di->dmadev, pa)) { + brcmu_pkt_buf_free_skb(p); + return; + } /* With a DMA segment list, Descriptor table is filled * using the segment list instead of looping over * buffers in multi-chain DMA. Therefore, EOF for SGLIST diff --git a/drivers/of/base.c b/drivers/of/base.c index 2390ddb..8d6faae 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1482,6 +1482,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) ap = dt_alloc(sizeof(*ap) + len + 1, 4); if (!ap) continue; + memset(ap, 0, sizeof(*ap) + len + 1); ap->alias = start; of_alias_add(ap, np, id, start, len); } diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 471c71f..d3234a6 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -326,7 +326,7 @@ static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask) static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin) { - return (readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1; + return !((readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1); } static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on) @@ -446,7 +446,7 @@ static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask, static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin) { - return (__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1; + return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1); } static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index a1dcdc3..bc108d2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2421,14 +2421,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) } } - if (modepage == 0x3F) { - sd_printk(KERN_ERR, sdkp, "No Caching mode page " - "present\n"); - goto defaults; - } else if ((buffer[offset] & 0x3f) != modepage) { - sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); - goto defaults; - } + sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); + goto defaults; + Page_found: if (modepage == 8) { sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c index 122d980..bc811dd 100644 --- a/drivers/staging/comedi/drivers/dt282x.c +++ b/drivers/staging/comedi/drivers/dt282x.c @@ -271,8 +271,9 @@ struct dt282x_private { } \ udelay(5); \ } \ - if (_i) \ + if (_i) { \ b \ + } \ } while (0) static int prep_ai_dma(struct comedi_device *dev, int chan, int size); diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 62f8276..73534ec 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c @@ -473,7 +473,7 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio) end = start + (bio->bi_size >> SECTOR_SHIFT); bound = zram->disksize >> SECTOR_SHIFT; /* out of range range */ - if (unlikely(start >= bound || end >= bound || start > end)) + if (unlikely(start >= bound || end > bound || start > end)) return 0; /* I/O request is valid */ @@ -622,9 +622,7 @@ static void zram_slot_free_notify(struct block_device *bdev, struct zram *zram; zram = bdev->bd_disk->private_data; - down_write(&zram->lock); zram_free_page(zram, index); - up_write(&zram->lock); zram_stat64_inc(zram, &zram->stats.notify_free); } diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h index 4265ab4..df2eec4 100644 --- a/drivers/staging/zram/zram_drv.h +++ b/drivers/staging/zram/zram_drv.h @@ -92,9 +92,8 @@ struct zram { void *compress_buffer; struct table *table; spinlock_t stat64_lock; /* protect 64-bit stats */ - struct rw_semaphore lock; /* protect compression buffers, table, - * 32bit stat counters against concurrent - * notifications, reads and writes */ + struct rw_semaphore lock; /* protect compression buffers and table + * against concurrent read and writes */ struct request_queue *queue; struct gendisk *disk; int init_done; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 122d056..46c9379 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -208,6 +208,7 @@ skip_error: static void wdm_int_callback(struct urb *urb) { int rv = 0; + int responding; int status = urb->status; struct wdm_device *desc; struct usb_cdc_notification *dr; @@ -261,8 +262,8 @@ static void wdm_int_callback(struct urb *urb) spin_lock(&desc->iuspin); clear_bit(WDM_READ, &desc->flags); - set_bit(WDM_RESPONDING, &desc->flags); - if (!test_bit(WDM_DISCONNECTING, &desc->flags) + responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); + if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags) && !test_bit(WDM_SUSPENDING, &desc->flags)) { rv = usb_submit_urb(desc->response, GFP_ATOMIC); dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", @@ -666,16 +667,20 @@ static void wdm_rxwork(struct work_struct *work) { struct wdm_device *desc = container_of(work, struct wdm_device, rxwork); unsigned long flags; - int rv; + int rv = 0; + int responding; spin_lock_irqsave(&desc->iuspin, flags); if (test_bit(WDM_DISCONNECTING, &desc->flags)) { spin_unlock_irqrestore(&desc->iuspin, flags); } else { + responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); - rv = usb_submit_urb(desc->response, GFP_KERNEL); + if (!responding) + rv = usb_submit_urb(desc->response, GFP_KERNEL); if (rv < 0 && rv != -EPERM) { spin_lock_irqsave(&desc->iuspin, flags); + clear_bit(WDM_RESPONDING, &desc->flags); if (!test_bit(WDM_DISCONNECTING, &desc->flags)) schedule_work(&desc->rxwork); spin_unlock_irqrestore(&desc->iuspin, flags); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 7199adc..a6b2cab 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); if (config->desc.bDescriptorType != USB_DT_CONFIG || - config->desc.bLength < USB_DT_CONFIG_SIZE) { + config->desc.bLength < USB_DT_CONFIG_SIZE || + config->desc.bLength > size) { dev_err(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d96ca07..0763d84 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2951,7 +2951,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) status); /* bail if autosuspend is requested */ if (PMSG_IS_AUTO(msg)) - return status; + goto err_wakeup; } } @@ -2960,14 +2960,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) usb_set_usb2_hardware_lpm(udev, 0); if (usb_disable_ltm(udev)) { - dev_err(&udev->dev, "%s Failed to disable LTM before suspend\n.", - __func__); - return -ENOMEM; + dev_err(&udev->dev, "Failed to disable LTM before suspend\n."); + status = -ENOMEM; + if (PMSG_IS_AUTO(msg)) + goto err_ltm; } if (usb_unlocked_disable_lpm(udev)) { - dev_err(&udev->dev, "%s Failed to disable LPM before suspend\n.", - __func__); - return -ENOMEM; + dev_err(&udev->dev, "Failed to disable LPM before suspend\n."); + status = -ENOMEM; + if (PMSG_IS_AUTO(msg)) + goto err_lpm3; } /* see 7.1.7.6 */ @@ -2981,28 +2983,31 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) if (status) { dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", port1, status); - /* paranoia: "should not happen" */ - if (udev->do_remote_wakeup) { - if (!hub_is_superspeed(hub->hdev)) { - (void) usb_control_msg(udev, - usb_sndctrlpipe(udev, 0), - USB_REQ_CLEAR_FEATURE, - USB_RECIP_DEVICE, - USB_DEVICE_REMOTE_WAKEUP, 0, - NULL, 0, - USB_CTRL_SET_TIMEOUT); - } else - (void) usb_disable_function_remotewakeup(udev); - - } + /* Try to enable USB3 LPM and LTM again */ + usb_unlocked_enable_lpm(udev); + err_lpm3: + usb_enable_ltm(udev); + err_ltm: /* Try to enable USB2 hardware LPM again */ if (udev->usb2_hw_lpm_capable == 1) usb_set_usb2_hardware_lpm(udev, 1); - /* Try to enable USB3 LTM and LPM again */ - usb_enable_ltm(udev); - usb_unlocked_enable_lpm(udev); + if (udev->do_remote_wakeup) { + if (udev->speed < USB_SPEED_SUPER) + usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, + USB_RECIP_DEVICE, + USB_DEVICE_REMOTE_WAKEUP, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); + else + usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + USB_REQ_CLEAR_FEATURE, + USB_RECIP_INTERFACE, + USB_INTRF_FUNC_SUSPEND, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); + } + err_wakeup: /* System sleep transitions should never fail */ if (!PMSG_IS_AUTO(msg)) diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index dedb80b..f4f032e 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -211,7 +211,7 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev) if (pdata && pdata->exit) pdata->exit(pdev); - if (pdata->otg) + if (pdata && pdata->otg) usb_phy_shutdown(pdata->otg); clk_disable_unprepare(priv->usbclk); diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 951514e..ef6782b 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -371,7 +371,7 @@ static struct pci_driver ohci_pci_driver = { .remove = usb_hcd_pci_remove, .shutdown = usb_hcd_pci_shutdown, -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM .driver = { .pm = &usb_hcd_pci_pm_ops }, diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 93ad67e..6e70ce9 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -24,7 +24,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci) * here that the generic code does not try to make a pci_dev from our * dev struct in order to setup MSI */ - xhci->quirks |= XHCI_BROKEN_MSI; + xhci->quirks |= XHCI_PLAT; } /* called during probe() after chip reset completes */ diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 661c553..8889d67 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -342,9 +342,14 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) static int xhci_try_enable_msi(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct pci_dev *pdev; int ret; + /* The xhci platform device has set up IRQs through usb_add_hcd. */ + if (xhci->quirks & XHCI_PLAT) + return 0; + + pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); /* * Some Fresco Logic host controllers advertise MSI, but fail to * generate interrupts. Don't even try to enable MSI. @@ -3502,10 +3507,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_virt_device *virt_dev; + struct device *dev = hcd->self.controller; unsigned long flags; u32 state; int i, ret; +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * We called pm_runtime_get_noresume when the device was attached. + * Decrement the counter here to allow controller to runtime suspend + * if no devices remain. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_put_noidle(dev); +#endif + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); /* If the host is halted due to driver unload, we still need to free the * device. @@ -3577,6 +3593,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct device *dev = hcd->self.controller; unsigned long flags; int timeleft; int ret; @@ -3629,6 +3646,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) goto disable_slot; } udev->slot_id = xhci->slot_id; + +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * If resetting upon resume, we can't put the controller into runtime + * suspend if there is a device attached. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_get_noresume(dev); +#endif + /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index abed254..d0dd206 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1516,6 +1516,7 @@ struct xhci_hcd { #define XHCI_SPURIOUS_REBOOT (1 << 13) #define XHCI_COMP_MODE_QUIRK (1 << 14) #define XHCI_AVOID_BEI (1 << 15) +#define XHCI_PLAT (1 << 16) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 397e028..c5afc94 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -375,7 +375,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, kfree(urbtrack); return -ENOMEM; } - urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); + urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC); if (!urbtrack->setup) { usb_free_urb(urbtrack->urb); kfree(urbtrack); @@ -383,8 +383,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, } urbtrack->setup->bRequestType = (__u8)0x40; urbtrack->setup->bRequest = (__u8)0x0e; - urbtrack->setup->wValue = get_reg_value(reg, dummy); - urbtrack->setup->wIndex = get_reg_index(reg); + urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy)); + urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg)); urbtrack->setup->wLength = 0; usb_fill_control_urb(urbtrack->urb, usbdev, usb_sndctrlpipe(usbdev, 0), diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 157c0cc..51be226 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -729,9 +729,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; + struct gnttab_free_callback *cb; + spin_lock_irqsave(&gnttab_list_lock, flags); - if (callback->next) - goto out; + + /* Check if the callback is already on the list */ + cb = gnttab_free_callback_list; + while (cb) { + if (cb == callback) + goto out; + cb = cb->next; + } + callback->fn = fn; callback->arg = arg; callback->count = count; diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index 36549a4..c171111 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -197,8 +197,10 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len, &dl.object_no, &dl.object_offset, &olen); - if (r < 0) + if (r < 0) { + up_read(&osdc->map_sem); return -EIO; + } dl.file_offset -= dl.object_offset; dl.object_size = ceph_file_layout_object_size(ci->i_layout); dl.block_size = ceph_file_layout_su(ci->i_layout); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6771d01..38bced5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -380,6 +380,7 @@ cifs_reconnect(struct TCP_Server_Info *server) try_to_freeze(); /* we should try only the port we connected to before */ + mutex_lock(&server->srv_mutex); rc = generic_ip_connect(server); if (rc) { cFYI(1, "reconnect error %d", rc); @@ -391,6 +392,7 @@ cifs_reconnect(struct TCP_Server_Info *server) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); } + mutex_unlock(&server->srv_mutex); } while (server->tcpStatus == CifsNeedReconnect); return rc; diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 7b1c5e3..a27f337 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -469,6 +469,7 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server) queue_work(cifsiod_wq, &cfile->oplock_break); + kfree(lw); spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_tcp_ses_lock); return true; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index bbcd6a0..a4903ff 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2341,16 +2341,32 @@ do { \ #define EXT4_FREECLUSTERS_WATERMARK 0 #endif +/* Update i_disksize. Requires i_mutex to avoid races with truncate */ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize) { - /* - * XXX: replace with spinlock if seen contended -bzzz - */ + WARN_ON_ONCE(S_ISREG(inode->i_mode) && + !mutex_is_locked(&inode->i_mutex)); + down_write(&EXT4_I(inode)->i_data_sem); + if (newsize > EXT4_I(inode)->i_disksize) + EXT4_I(inode)->i_disksize = newsize; + up_write(&EXT4_I(inode)->i_data_sem); +} + +/* + * Update i_disksize after writeback has been started. Races with truncate + * are avoided by checking i_size under i_data_sem. + */ +static inline void ext4_wb_update_i_disksize(struct inode *inode, loff_t newsize) +{ + loff_t i_size; + down_write(&EXT4_I(inode)->i_data_sem); + i_size = i_size_read(inode); + if (newsize > i_size) + newsize = i_size; if (newsize > EXT4_I(inode)->i_disksize) EXT4_I(inode)->i_disksize = newsize; up_write(&EXT4_I(inode)->i_data_sem); - return ; } struct ext4_group_info { diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 1ec8761..fe1fada 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1666,10 +1666,8 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd) * Update on-disk size along with block allocation. */ disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits; - if (disksize > i_size_read(mpd->inode)) - disksize = i_size_read(mpd->inode); if (disksize > EXT4_I(mpd->inode)->i_disksize) { - ext4_update_i_disksize(mpd->inode, disksize); + ext4_wb_update_i_disksize(mpd->inode, disksize); err = ext4_mark_inode_dirty(handle, mpd->inode); if (err) ext4_error(mpd->inode->i_sb, @@ -4422,7 +4420,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) ext4_journal_stop(handle); } - if (attr->ia_valid & ATTR_SIZE) { + if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { + handle_t *handle; + loff_t oldsize = inode->i_size; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); @@ -4430,72 +4430,69 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) if (attr->ia_size > sbi->s_bitmap_maxbytes) return -EFBIG; } - } - - if (S_ISREG(inode->i_mode) && - attr->ia_valid & ATTR_SIZE && - (attr->ia_size < inode->i_size)) { - handle_t *handle; - - handle = ext4_journal_start(inode, 3); - if (IS_ERR(handle)) { - error = PTR_ERR(handle); - goto err_out; - } - if (ext4_handle_valid(handle)) { - error = ext4_orphan_add(handle, inode); - orphan = 1; - } - EXT4_I(inode)->i_disksize = attr->ia_size; - rc = ext4_mark_inode_dirty(handle, inode); - if (!error) - error = rc; - ext4_journal_stop(handle); - - if (ext4_should_order_data(inode)) { - error = ext4_begin_ordered_truncate(inode, + if (S_ISREG(inode->i_mode) && + (attr->ia_size < inode->i_size)) { + if (ext4_should_order_data(inode)) { + error = ext4_begin_ordered_truncate(inode, attr->ia_size); - if (error) { - /* Do as much error cleanup as possible */ - handle = ext4_journal_start(inode, 3); - if (IS_ERR(handle)) { - ext4_orphan_del(NULL, inode); + if (error) goto err_out; - } - ext4_orphan_del(handle, inode); - orphan = 0; - ext4_journal_stop(handle); + } + handle = ext4_journal_start(inode, 3); + if (IS_ERR(handle)) { + error = PTR_ERR(handle); goto err_out; } - } - } - - if (attr->ia_valid & ATTR_SIZE) { - if (attr->ia_size != inode->i_size) { - loff_t oldsize = inode->i_size; - - i_size_write(inode, attr->ia_size); - /* - * Blocks are going to be removed from the inode. Wait - * for dio in flight. Temporarily disable - * dioread_nolock to prevent livelock. - */ - if (orphan) { - if (!ext4_should_journal_data(inode)) { - ext4_inode_block_unlocked_dio(inode); - inode_dio_wait(inode); - ext4_inode_resume_unlocked_dio(inode); - } else - ext4_wait_for_tail_page_commit(inode); + if (ext4_handle_valid(handle)) { + error = ext4_orphan_add(handle, inode); + orphan = 1; } + down_write(&EXT4_I(inode)->i_data_sem); + EXT4_I(inode)->i_disksize = attr->ia_size; + rc = ext4_mark_inode_dirty(handle, inode); + if (!error) + error = rc; /* - * Truncate pagecache after we've waited for commit - * in data=journal mode to make pages freeable. + * We have to update i_size under i_data_sem together + * with i_disksize to avoid races with writeback code + * running ext4_wb_update_i_disksize(). */ - truncate_pagecache(inode, oldsize, inode->i_size); + if (!error) + i_size_write(inode, attr->ia_size); + up_write(&EXT4_I(inode)->i_data_sem); + ext4_journal_stop(handle); + if (error) { + ext4_orphan_del(NULL, inode); + goto err_out; + } + } else + i_size_write(inode, attr->ia_size); + + /* + * Blocks are going to be removed from the inode. Wait + * for dio in flight. Temporarily disable + * dioread_nolock to prevent livelock. + */ + if (orphan) { + if (!ext4_should_journal_data(inode)) { + ext4_inode_block_unlocked_dio(inode); + inode_dio_wait(inode); + ext4_inode_resume_unlocked_dio(inode); + } else + ext4_wait_for_tail_page_commit(inode); } - ext4_truncate(inode); + /* + * Truncate pagecache after we've waited for commit + * in data=journal mode to make pages freeable. + */ + truncate_pagecache(inode, oldsize, inode->i_size); } + /* + * We want to call ext4_truncate() even if attr->ia_size == + * inode->i_size for cases like truncation of fallocated space + */ + if (attr->ia_valid & ATTR_SIZE) + ext4_truncate(inode); if (!rc) { setattr_copy(inode, attr); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 315e1f8..146276a 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1148,6 +1148,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file, return -EIO; if (reclen > nbytes) break; + if (memchr(dirent->name, '/', dirent->namelen) != NULL) + return -EIO; over = filldir(dstbuf, dirent->name, dirent->namelen, file->f_pos, dirent->ino, dirent->type); @@ -1548,6 +1550,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name, fc->no_setxattr = 1; err = -EOPNOTSUPP; } + if (!err) + fuse_invalidate_attr(inode); return err; } @@ -1677,6 +1681,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name) fc->no_removexattr = 1; err = -EOPNOTSUPP; } + if (!err) + fuse_invalidate_attr(inode); return err; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f3ab824..e9c40f3 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1299,7 +1299,6 @@ static int fuse_writepage_locked(struct page *page) inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); - end_page_writeback(page); spin_lock(&fc->lock); list_add(&req->writepages_entry, &fi->writepages); @@ -1307,6 +1306,8 @@ static int fuse_writepage_locked(struct page *page) fuse_flush_writepages(inode); spin_unlock(&fc->lock); + end_page_writeback(page); + return 0; err_free: diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index c5bfb4c..a1822f8 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -782,7 +782,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, cpos = map_start >> osb->s_clustersize_bits; mapping_end = ocfs2_clusters_for_bytes(inode->i_sb, map_start + map_len); - mapping_end -= cpos; is_last = 0; while (cpos < mapping_end && !is_last) { u32 fe_flags; diff --git a/fs/proc/root.c b/fs/proc/root.c index 9c7fab1..411068e 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -110,7 +110,8 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, ns = task_active_pid_ns(current); options = data; - if (!current_user_ns()->may_mount_proc) + if (!current_user_ns()->may_mount_proc || + !ns_capable(ns->user_ns, CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); } diff --git a/include/linux/compat.h b/include/linux/compat.h index dec7e2d..98ac3c5 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -648,6 +648,13 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, int compat_restore_altstack(const compat_stack_t __user *uss); int __compat_save_altstack(compat_stack_t __user *, unsigned long); +#define compat_save_altstack_ex(uss, sp) do { \ + compat_stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \ + put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ +} while (0); #endif asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, diff --git a/include/linux/hid.h b/include/linux/hid.h index 7330a0f..13d812b 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -392,10 +392,12 @@ struct hid_report { struct hid_device *device; /* associated device */ }; +#define HID_MAX_IDS 256 + struct hid_report_enum { unsigned numbered; struct list_head report_list; - struct hid_report *report_id_hash[256]; + struct hid_report *report_id_hash[HID_MAX_IDS]; }; #define HID_REPORT_TYPES 3 diff --git a/include/linux/rculist.h b/include/linux/rculist.h index c92dd28..c9e7330 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list, */ #define list_first_or_null_rcu(ptr, type, member) \ ({struct list_head *__ptr = (ptr); \ - struct list_head __rcu *__next = list_next_rcu(__ptr); \ - likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \ + struct list_head *__next = ACCESS_ONCE(__ptr->next); \ + likely(__ptr != __next) ? \ + list_entry_rcu(__next, type, member) : NULL; \ }) /** diff --git a/include/linux/signal.h b/include/linux/signal.h index 0a89ffc..b50aa60 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -388,4 +388,12 @@ void signals_init(void); int restore_altstack(const stack_t __user *); int __save_altstack(stack_t __user *, unsigned long); +#define save_altstack_ex(uss, sp) do { \ + stack_t __user *__uss = uss; \ + struct task_struct *t = current; \ + put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \ + put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \ + put_user_ex(t->sas_ss_size, &__uss->ss_size); \ +} while (0); + #endif /* _LINUX_SIGNAL_H */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 0a78df5..40e8563 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -408,7 +408,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev, extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM extern const struct dev_pm_ops usb_hcd_pci_pm_ops; #endif #endif /* CONFIG_PCI */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 9650911..0f9b3c3 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -22,6 +22,7 @@ #define _V4L2_CTRLS_H #include <linux/list.h> +#include <linux/mutex.h> #include <linux/videodev2.h> /* forward references */ diff --git a/kernel/fork.c b/kernel/fork.c index 1c53e38..5c992f8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1170,10 +1170,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, return ERR_PTR(-EINVAL); /* - * If the new process will be in a different pid namespace - * don't allow the creation of threads. + * If the new process will be in a different pid namespace don't + * allow it to share a thread group or signal handlers with the + * forking task. */ - if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && + if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) && (task_active_pid_ns(current) != current->nsproxy->pid_ns)) return ERR_PTR(-EINVAL); diff --git a/kernel/pid.c b/kernel/pid.c index f2c6a68..a4434f2 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -262,6 +262,7 @@ void free_pid(struct pid *pid) struct pid_namespace *ns = upid->ns; hlist_del_rcu(&upid->pid_chain); switch(--ns->nr_hashed) { + case 2: case 1: /* When all that is left in the pid namespace * is the reaper wake up the reaper. The reaper diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9459edd..e9308d3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2327,6 +2327,8 @@ static void collapse_huge_page(struct mm_struct *mm, goto out; vma = find_vma(mm, address); + if (!vma) + goto out; hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (address < hstart || address + HPAGE_PMD_SIZE > hend) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fd7c0d3..6b7ff19 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5428,7 +5428,13 @@ static int compare_thresholds(const void *a, const void *b) const struct mem_cgroup_threshold *_a = a; const struct mem_cgroup_threshold *_b = b; - return _a->threshold - _b->threshold; + if (_a->threshold > _b->threshold) + return 1; + + if (_a->threshold < _b->threshold) + return -1; + + return 0; } static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) diff --git a/mm/swap.c b/mm/swap.c index 6310dc2..7009b21 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -30,6 +30,7 @@ #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> +#include <linux/hugetlb.h> #include "internal.h" @@ -77,6 +78,19 @@ static void __put_compound_page(struct page *page) static void put_compound_page(struct page *page) { + /* + * hugetlbfs pages cannot be split from under us. If this is a + * hugetlbfs page, check refcount on head page and release the page if + * the refcount becomes zero. + */ + if (PageHuge(page)) { + page = compound_head(page); + if (put_page_testzero(page)) + __put_compound_page(page); + + return; + } + if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); @@ -180,38 +194,51 @@ bool __get_page_tail(struct page *page) * proper PT lock that already serializes against * split_huge_page(). */ - unsigned long flags; bool got = false; - struct page *page_head = compound_trans_head(page); + struct page *page_head; - if (likely(page != page_head && get_page_unless_zero(page_head))) { + /* + * If this is a hugetlbfs page it cannot be split under us. Simply + * increment refcount for the head page. + */ + if (PageHuge(page)) { + page_head = compound_head(page); + atomic_inc(&page_head->_count); + got = true; + } else { + unsigned long flags; + + page_head = compound_trans_head(page); + if (likely(page != page_head && + get_page_unless_zero(page_head))) { + + /* Ref to put_compound_page() comment. */ + if (PageSlab(page_head)) { + if (likely(PageTail(page))) { + __get_page_tail_foll(page, false); + return true; + } else { + put_page(page_head); + return false; + } + } - /* Ref to put_compound_page() comment. */ - if (PageSlab(page_head)) { + /* + * page_head wasn't a dangling pointer but it + * may not be a head page anymore by the time + * we obtain the lock. That is ok as long as it + * can't be freed from under us. + */ + flags = compound_lock_irqsave(page_head); + /* here __split_huge_page_refcount won't run anymore */ if (likely(PageTail(page))) { __get_page_tail_foll(page, false); - return true; - } else { - put_page(page_head); - return false; + got = true; } + compound_unlock_irqrestore(page_head, flags); + if (unlikely(!got)) + put_page(page_head); } - - /* - * page_head wasn't a dangling pointer but it - * may not be a head page anymore by the time - * we obtain the lock. That is ok as long as it - * can't be freed from under us. - */ - flags = compound_lock_irqsave(page_head); - /* here __split_huge_page_refcount won't run anymore */ - if (likely(PageTail(page))) { - __get_page_tail_foll(page, false); - got = true; - } - compound_unlock_irqrestore(page_head, flags); - if (unlikely(!got)) - put_page(page_head); } return got; } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index a5dfcb8..d62120b 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1723,6 +1723,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, dout("osdc_start_request failed map, " " will retry %lld\n", req->r_tid); rc = 0; + } else { + __unregister_request(osdc, req); } goto out_unlock; } diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index de73214..652c11e 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -1128,7 +1128,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, /* pg_temp? */ t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), - pool->pgp_num_mask); + pool->pg_num_mask); pgid.ps = cpu_to_le16(t); pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); if (pg) { diff --git a/net/core/sock.c b/net/core/sock.c index 57b3243..b8af814 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -208,7 +208,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , - "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX" + "sk_lock-AF_NFC" , "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , @@ -224,7 +224,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , - "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX" + "slock-AF_NFC" , "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , @@ -240,7 +240,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , - "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX" + "clock-AF_NFC" , "clock-AF_MAX" }; /* diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 63607da..714b801 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -3099,6 +3099,7 @@ static struct snd_pci_quirk msi_black_list[] = { SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ + SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */ SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */ {} diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 5c19496..40f5e2d 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -530,6 +530,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels) } } + if (!ca) { + /* if there was no match, select the regular ALSA channel + * allocation with the matching number of channels */ + for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) { + if (channels == channel_allocations[i].channels) { + ca = channel_allocations[i].ca_index; + break; + } + } + } + snd_print_channel_allocation(eld->spk_alloc, buf, sizeof(buf)); snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n", ca, channels, buf); diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c index 5402dfb..8a8d936 100644 --- a/sound/soc/codecs/mc13783.c +++ b/sound/soc/codecs/mc13783.c @@ -126,6 +126,10 @@ static int mc13783_write(struct snd_soc_codec *codec, ret = mc13xxx_reg_write(priv->mc13xxx, reg, value); + /* include errata fix for spi audio problems */ + if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC) + ret = mc13xxx_reg_write(priv->mc13xxx, reg, value); + mc13xxx_unlock(priv->mc13xxx); return ret; diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 9bb9273..9d16a89 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -847,9 +847,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, if (pll_div.k) { reg |= 0x20; - snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f); - snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff); - snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff); + snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff); + snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff); + snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff); } snd_soc_write(codec, WM8960_PLL1, reg); -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html