diff --git a/Makefile b/Makefile index 3915c57..831a7d8 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 11 SUBLEVEL = 10 -EXTRAVERSION = .7 +EXTRAVERSION = .8 NAME = Linux for Workgroups # *DOCUMENTATION* diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 821170e..b969eea 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -16,6 +16,7 @@ config M68K select FPU if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE + select HAVE_FUTEX_CMPXCHG if MMU && FUTEX select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_REL select MODULES_USE_ELF_RELA diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8a4cae7..f443713 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -116,6 +116,7 @@ config S390 select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST + select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZ4 diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index a00cbd3..d268475 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -25,7 +25,7 @@ config SPARC select RTC_DRV_M48T59 select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG - select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_JUMP_LABEL if SPARC64 select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e562d3c..ad7e178 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long); extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); -#define __copy_to_user_inatomic ___copy_to_user -#define __copy_from_user_inatomic ___copy_from_user +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user struct pt_regs; extern unsigned long compute_effective_address(struct pt_regs *, diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index bc4d3f5..cb02145 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -398,8 +398,8 @@ static void apb_fake_ranges(struct pci_dev *dev, apb_calc_first_last(map, &first, &last); res = bus->resource[1]; res->flags = IORESOURCE_MEM; - region.start = (first << 21); - region.end = (last << 21) + ((1 << 21) - 1); + region.start = (first << 29); + region.end = (last << 29) + ((1 << 29) - 1); pcibios_bus_to_resource(dev, res, ®ion); } diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index baebab2..b9cc976 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -57,9 +57,12 @@ void arch_cpu_idle(void) { if (tlb_type != hypervisor) { touch_nmi_watchdog(); + local_irq_enable(); } else { unsigned long pstate; + local_irq_enable(); + /* The sun4v sleeping code requires that we have PSTATE.IE cleared over * the cpu sleep hypervisor call. */ @@ -81,7 +84,6 @@ void arch_cpu_idle(void) : "=&r" (pstate) : "i" (PSTATE_IE)); } - local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 73ec8a7..c79c687 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -189,7 +189,8 @@ linux_sparc_syscall32: mov %i0, %l5 ! IEU1 5: call %l7 ! CTI Group brk forced srl %i5, 0, %o5 ! IEU1 - ba,a,pt %xcc, 3f + ba,pt %xcc, 3f + sra %o0, 0, %o0 /* Linux native system calls enter here... */ .align 32 @@ -217,7 +218,6 @@ linux_sparc_syscall: 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 - sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 sllx %g2, 32, %g2 diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index 586f41a..185fad4 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -24,10 +24,6 @@ .align 16 .Lbswap_mask: .octa 0x000102030405060708090a0b0c0d0e0f -.Lpoly: - .octa 0xc2000000000000000000000000000001 -.Ltwo_one: - .octa 0x00000001000000000000000000000001 #define DATA %xmm0 #define SHASH %xmm1 @@ -134,28 +130,3 @@ ENTRY(clmul_ghash_update) .Lupdate_just_ret: ret ENDPROC(clmul_ghash_update) - -/* - * void clmul_ghash_setkey(be128 *shash, const u8 *key); - * - * Calculate hash_key << 1 mod poly - */ -ENTRY(clmul_ghash_setkey) - movaps .Lbswap_mask, BSWAP - movups (%rsi), %xmm0 - PSHUFB_XMM BSWAP %xmm0 - movaps %xmm0, %xmm1 - psllq $1, %xmm0 - psrlq $63, %xmm1 - movaps %xmm1, %xmm2 - pslldq $8, %xmm1 - psrldq $8, %xmm2 - por %xmm1, %xmm0 - # reduction - pshufd $0b00100100, %xmm2, %xmm1 - pcmpeqd .Ltwo_one, %xmm1 - pand .Lpoly, %xmm1 - pxor %xmm1, %xmm0 - movups %xmm0, (%rdi) - ret -ENDPROC(clmul_ghash_setkey) diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 6759dd1..d785cf2 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -30,8 +30,6 @@ void clmul_ghash_mul(char *dst, const be128 *shash); void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, const be128 *shash); -void clmul_ghash_setkey(be128 *shash, const u8 *key); - struct ghash_async_ctx { struct cryptd_ahash *cryptd_tfm; }; @@ -58,13 +56,23 @@ static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); + be128 *x = (be128 *)key; + u64 a, b; if (keylen != GHASH_BLOCK_SIZE) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } - clmul_ghash_setkey(&ctx->shash, key); + /* perform multiplication by 'x' in GF(2^128) */ + a = be64_to_cpu(x->a); + b = be64_to_cpu(x->b); + + ctx->shash.a = (__be64)((b << 1) | (a >> 63)); + ctx->shash.b = (__be64)((a << 1) | (b >> 63)); + + if (a >> 63) + ctx->shash.b ^= cpu_to_be64(0xc2); return 0; } diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index e59afaa..38f2da4 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -133,6 +133,9 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, { int i; + if (!policy->governor_enabled) + return; + if (!all_cpus) { __gov_queue_work(smp_processor_id(), dbs_data, delay); } else { diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index ea8e103..396f357 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -26,41 +26,108 @@ static unsigned int busfreq; /* FSB, in 10 kHz */ static unsigned int max_multiplier; +static unsigned int param_busfreq = 0; +static unsigned int param_max_multiplier = 0; + +module_param_named(max_multiplier, param_max_multiplier, uint, S_IRUGO); +MODULE_PARM_DESC(max_multiplier, "Maximum multiplier (allowed values: 20 30 35 40 45 50 55 60)"); + +module_param_named(bus_frequency, param_busfreq, uint, S_IRUGO); +MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz"); /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ static struct cpufreq_frequency_table clock_ratio[] = { - {45, /* 000 -> 4.5x */ 0}, + {60, /* 110 -> 6.0x */ 0}, + {55, /* 011 -> 5.5x */ 0}, {50, /* 001 -> 5.0x */ 0}, + {45, /* 000 -> 4.5x */ 0}, {40, /* 010 -> 4.0x */ 0}, - {55, /* 011 -> 5.5x */ 0}, - {20, /* 100 -> 2.0x */ 0}, - {30, /* 101 -> 3.0x */ 0}, - {60, /* 110 -> 6.0x */ 0}, {35, /* 111 -> 3.5x */ 0}, + {30, /* 101 -> 3.0x */ 0}, + {20, /* 100 -> 2.0x */ 0}, {0, CPUFREQ_TABLE_END} }; +static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 }; +static const u8 register_to_index[8] = { 3, 2, 4, 1, 7, 6, 0, 5 }; + +static const struct { + unsigned freq; + unsigned mult; +} usual_frequency_table[] = { + { 400000, 40 }, // 100 * 4 + { 450000, 45 }, // 100 * 4.5 + { 475000, 50 }, // 95 * 5 + { 500000, 50 }, // 100 * 5 + { 506250, 45 }, // 112.5 * 4.5 + { 533500, 55 }, // 97 * 5.5 + { 550000, 55 }, // 100 * 5.5 + { 562500, 50 }, // 112.5 * 5 + { 570000, 60 }, // 95 * 6 + { 600000, 60 }, // 100 * 6 + { 618750, 55 }, // 112.5 * 5.5 + { 660000, 55 }, // 120 * 5.5 + { 675000, 60 }, // 112.5 * 6 + { 720000, 60 }, // 120 * 6 +}; + +#define FREQ_RANGE 3000 /** * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier * - * Returns the current setting of the frequency multiplier. Core clock + * Returns the current setting of the frequency multiplier. Core clock * speed is frequency of the Front-Side Bus multiplied with this value. */ static int powernow_k6_get_cpu_multiplier(void) { - u64 invalue = 0; + unsigned long invalue = 0; u32 msrval; + local_irq_disable(); + msrval = POWERNOW_IOPORT + 0x1; wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ invalue = inl(POWERNOW_IOPORT + 0x8); msrval = POWERNOW_IOPORT + 0x0; wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ - return clock_ratio[(invalue >> 5)&7].driver_data; + local_irq_enable(); + + return clock_ratio[register_to_index[(invalue >> 5)&7]].driver_data; } +static void powernow_k6_set_cpu_multiplier(unsigned int best_i) +{ + unsigned long outvalue, invalue; + unsigned long msrval; + unsigned long cr0; + + /* we now need to transform best_i to the BVC format, see AMD#23446 */ + + /* + * The processor doesn't respond to inquiry cycles while changing the + * frequency, so we must disable cache. + */ + local_irq_disable(); + cr0 = read_cr0(); + write_cr0(cr0 | X86_CR0_CD); + wbinvd(); + + outvalue = (1<<12) | (1<<10) | (1<<9) | (index_to_register[best_i]<<5); + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue = inl(POWERNOW_IOPORT + 0x8); + invalue = invalue & 0x1f; + outvalue = outvalue | invalue; + outl(outvalue, (POWERNOW_IOPORT + 0x8)); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + write_cr0(cr0); + local_irq_enable(); +} /** * powernow_k6_set_state - set the PowerNow! multiplier @@ -71,8 +138,6 @@ static int powernow_k6_get_cpu_multiplier(void) static void powernow_k6_set_state(struct cpufreq_policy *policy, unsigned int best_i) { - unsigned long outvalue = 0, invalue = 0; - unsigned long msrval; struct cpufreq_freqs freqs; if (clock_ratio[best_i].driver_data > max_multiplier) { @@ -85,18 +150,7 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy, cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - /* we now need to transform best_i to the BVC format, see AMD#23446 */ - - outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); - - msrval = POWERNOW_IOPORT + 0x1; - wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ - invalue = inl(POWERNOW_IOPORT + 0x8); - invalue = invalue & 0xf; - outvalue = outvalue | invalue; - outl(outvalue , (POWERNOW_IOPORT + 0x8)); - msrval = POWERNOW_IOPORT + 0x0; - wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + powernow_k6_set_cpu_multiplier(best_i); cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); @@ -141,18 +195,57 @@ static int powernow_k6_target(struct cpufreq_policy *policy, return 0; } - static int powernow_k6_cpu_init(struct cpufreq_policy *policy) { unsigned int i, f; int result; + unsigned khz; if (policy->cpu != 0) return -ENODEV; - /* get frequencies */ - max_multiplier = powernow_k6_get_cpu_multiplier(); - busfreq = cpu_khz / max_multiplier; + max_multiplier = 0; + khz = cpu_khz; + for (i = 0; i < ARRAY_SIZE(usual_frequency_table); i++) { + if (khz >= usual_frequency_table[i].freq - FREQ_RANGE && + khz <= usual_frequency_table[i].freq + FREQ_RANGE) { + khz = usual_frequency_table[i].freq; + max_multiplier = usual_frequency_table[i].mult; + break; + } + } + if (param_max_multiplier) { + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].driver_data == param_max_multiplier) { + max_multiplier = param_max_multiplier; + goto have_max_multiplier; + } + } + printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n"); + return -EINVAL; + } + + if (!max_multiplier) { + printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz); + printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n"); + return -EOPNOTSUPP; + } + +have_max_multiplier: + param_max_multiplier = max_multiplier; + + if (param_busfreq) { + if (param_busfreq >= 50000 && param_busfreq <= 150000) { + busfreq = param_busfreq / 10; + goto have_busfreq; + } + printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n"); + return -EINVAL; + } + + busfreq = khz / max_multiplier; +have_busfreq: + param_busfreq = busfreq * 10; /* table init */ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { @@ -164,7 +257,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) } /* cpuinfo and default policy values */ - policy->cpuinfo.transition_latency = 200000; + policy->cpuinfo.transition_latency = 500000; policy->cur = busfreq * max_multiplier; result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 3ac499d..6e11701 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -331,7 +331,8 @@ struct cpuidle_driver *cpuidle_driver_ref(void) spin_lock(&cpuidle_driver_lock); drv = cpuidle_get_driver(); - drv->refcnt++; + if (drv) + drv->refcnt++; spin_unlock(&cpuidle_driver_lock); return drv; diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c index 888a81a..0aaea7a 100644 --- a/drivers/input/mouse/cypress_ps2.c +++ b/drivers/input/mouse/cypress_ps2.c @@ -410,7 +410,6 @@ static int cypress_set_input_params(struct input_dev *input, __clear_bit(REL_X, input->relbit); __clear_bit(REL_Y, input->relbit); - __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); __set_bit(EV_KEY, input->evbit); __set_bit(BTN_LEFT, input->keybit); __set_bit(BTN_RIGHT, input->keybit); diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 02125e6..5a4da94 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -518,9 +518,9 @@ static isdnloop_stat isdnloop_cmd_table[] = static void isdnloop_fake_err(isdnloop_card *card) { - char buf[60]; + char buf[64]; - sprintf(buf, "E%s", card->omsg); + snprintf(buf, sizeof(buf), "E%s", card->omsg); isdnloop_fake(card, buf, -1); isdnloop_fake(card, "NAK", -1); } @@ -903,6 +903,8 @@ isdnloop_parse_cmd(isdnloop_card *card) case 7: /* 0x;EAZ */ p += 3; + if (strlen(p) >= sizeof(card->eazlist[0])) + break; strcpy(card->eazlist[ch - 1], p); break; case 8: @@ -1070,6 +1072,12 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp) return -EBUSY; if (copy_from_user((char *) &sdef, (char *) sdefp, sizeof(sdef))) return -EFAULT; + + for (i = 0; i < 3; i++) { + if (!memchr(sdef.num[i], 0, sizeof(sdef.num[i]))) + return -EINVAL; + } + spin_lock_irqsave(&card->isdnloop_lock, flags); switch (sdef.ptype) { case ISDN_PTYPE_EURO: @@ -1127,7 +1135,7 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card) { ulong a; int i; - char cbuf[60]; + char cbuf[80]; isdn_ctrl cmd; isdnloop_cdef cdef; @@ -1192,7 +1200,6 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card) break; if ((c->arg & 255) < ISDNLOOP_BCH) { char *p; - char dial[50]; char dcode[4]; a = c->arg; @@ -1204,10 +1211,10 @@ isdnloop_command(isdn_ctrl *c, isdnloop_card *card) } else /* Normal Dial */ strcpy(dcode, "CAL"); - strcpy(dial, p); - sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), - dcode, dial, c->parm.setup.si1, - c->parm.setup.si2, c->parm.setup.eazmsn); + snprintf(cbuf, sizeof(cbuf), + "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1), + dcode, p, c->parm.setup.si1, + c->parm.setup.si2, c->parm.setup.eazmsn); i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card); } break; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index c200bc2..17822c5 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17432,8 +17432,6 @@ static int tg3_init_one(struct pci_dev *pdev, tg3_init_bufmgr_config(tp); - features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - /* 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ @@ -17465,7 +17463,8 @@ static int tg3_init_one(struct pci_dev *pdev, features |= NETIF_F_TSO_ECN; } - dev->features |= features; + dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features |= features; /* diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index f6dce47..3d50e7d 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -727,14 +727,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); // precondition: never called in_interrupt static void usbnet_terminate_urbs(struct usbnet *dev) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); int temp; /* ensure there are no more active urbs */ - add_wait_queue(&unlink_wakeup, &wait); + add_wait_queue(&dev->wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); - dev->wait = &unlink_wakeup; temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); @@ -748,15 +746,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev) "waited for %d urb completions\n", temp); } set_current_state(TASK_RUNNING); - dev->wait = NULL; - remove_wait_queue(&unlink_wakeup, &wait); + remove_wait_queue(&dev->wait, &wait); } int usbnet_stop (struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct driver_info *info = dev->driver_info; - int retval; + int retval, pm; clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue (net); @@ -766,6 +763,8 @@ int usbnet_stop (struct net_device *net) net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors); + /* to not race resume */ + pm = usb_autopm_get_interface(dev->intf); /* allow minidriver to stop correctly (wireless devices to turn off * radio etc) */ if (info->stop) { @@ -792,6 +791,9 @@ int usbnet_stop (struct net_device *net) dev->flags = 0; del_timer_sync (&dev->delay); tasklet_kill (&dev->bh); + if (!pm) + usb_autopm_put_interface(dev->intf); + if (info->manage_power && !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) info->manage_power(dev, 0); @@ -1360,11 +1362,12 @@ static void usbnet_bh (unsigned long param) /* restart RX again after disabling due to high error rate */ clear_bit(EVENT_RX_KILL, &dev->flags); - // waiting for all pending urbs to complete? - if (dev->wait) { - if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { - wake_up (dev->wait); - } + /* waiting for all pending urbs to complete? + * only then can we forgo submitting anew + */ + if (waitqueue_active(&dev->wait)) { + if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) + wake_up_all(&dev->wait); // or are we maybe short a few urbs? } else if (netif_running (dev->net) && @@ -1502,6 +1505,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->driver_name = name; dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); + init_waitqueue_head(&dev->wait); skb_queue_head_init (&dev->rxq); skb_queue_head_init (&dev->txq); skb_queue_head_init (&dev->done); @@ -1694,9 +1698,10 @@ int usbnet_resume (struct usb_interface *intf) spin_unlock_irq(&dev->txq.lock); if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { - /* handle remote wakeup ASAP */ - if (!dev->wait && - netif_device_present(dev->net) && + /* handle remote wakeup ASAP + * we cannot race against stop + */ + if (netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) rx_alloc_submit(dev, GFP_NOIO); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8a05d77..116810f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -978,6 +978,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) neigh_release(n); + if (reply == NULL) + goto out; + skb_reset_mac_header(reply); __skb_pull(reply, skb_network_offset(reply)); reply->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index ac55997..3335983 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -354,8 +354,8 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head) * into multiple copies tend to give large frags their * own buffers as before. */ - if ((offset + size > MAX_BUFFER_OFFSET) && - (size <= MAX_BUFFER_OFFSET) && offset && !head) + BUG_ON(size > MAX_BUFFER_OFFSET); + if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head) return true; return false; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 969a859..786b86d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -510,9 +510,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, r = -ENOBUFS; goto err; } - d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, + r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, ARRAY_SIZE(vq->iov) - seg, &out, &in, log, log_num); + if (unlikely(r < 0)) + goto err; + + d = r; if (d == vq->num) { r = 0; goto err; @@ -537,6 +541,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, *iovcount = seg; if (unlikely(log)) *log_num = nlogs; + + /* Detect overrun */ + if (unlikely(datalen > 0)) { + r = UIO_MAXIOV + 1; + goto err; + } return headcount; err: vhost_discard_vq_desc(vq, headcount); @@ -592,6 +602,14 @@ static void handle_rx(struct vhost_net *net) /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) break; + /* On overrun, truncate and discard */ + if (unlikely(headcount > UIO_MAXIOV)) { + msg.msg_iovlen = 1; + err = sock->ops->recvmsg(NULL, sock, &msg, + 1, MSG_DONTWAIT | MSG_TRUNC); + pr_debug("Discarded rx packet: len %zd\n", sock_len); + continue; + } /* OK, now we need to know about added descriptors. */ if (!headcount) { if (unlikely(vhost_enable_notify(&net->dev, vq))) { diff --git a/include/linux/futex.h b/include/linux/futex.h index b0d95ca..6435f46 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h @@ -55,7 +55,11 @@ union futex_key { #ifdef CONFIG_FUTEX extern void exit_robust_list(struct task_struct *curr); extern void exit_pi_state_list(struct task_struct *curr); +#ifdef CONFIG_HAVE_FUTEX_CMPXCHG +#define futex_cmpxchg_enabled 1 +#else extern int futex_cmpxchg_enabled; +#endif #else static inline void exit_robust_list(struct task_struct *curr) { diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index f18d641..123b21b 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -30,7 +30,7 @@ struct usbnet { struct driver_info *driver_info; const char *driver_name; void *driver_priv; - wait_queue_head_t *wait; + wait_queue_head_t wait; struct mutex phy_mutex; unsigned char suspend_count; unsigned char pkt_cnt, pkt_err; diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index 3313108..86dd7dd 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -41,8 +41,8 @@ enum nf_ct_ext_id { /* Extensions: optional stuff which isn't permanently in struct. */ struct nf_ct_ext { struct rcu_head rcu; - u8 offset[NF_CT_EXT_NUM]; - u8 len; + u16 offset[NF_CT_EXT_NUM]; + u16 len; char data[0]; }; diff --git a/include/net/sock.h b/include/net/sock.h index 04e148f..643f92e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1443,6 +1443,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) */ #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) +static inline void sock_release_ownership(struct sock *sk) +{ + sk->sk_lock.owned = 0; +} + /* * Macro so as to not evaluate some arguments when * lockdep is not enabled. diff --git a/init/Kconfig b/init/Kconfig index fed81b5..390ff19 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1402,6 +1402,13 @@ config FUTEX support for "fast userspace mutexes". The resulting kernel may not run glibc-based applications correctly. +config HAVE_FUTEX_CMPXCHG + bool + help + Architectures should select this if futex_atomic_cmpxchg_inatomic() + is implemented and always working. This removes a couple of runtime + checks. + config EPOLL bool "Enable eventpoll support" if EXPERT default y diff --git a/kernel/futex.c b/kernel/futex.c index 221a58f..15a7d724 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -68,7 +68,9 @@ #include "rtmutex_common.h" +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG int __read_mostly futex_cmpxchg_enabled; +#endif #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) @@ -2730,10 +2732,10 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } -static int __init futex_init(void) +static void __init futex_detect_cmpxchg(void) { +#ifndef CONFIG_HAVE_FUTEX_CMPXCHG u32 curval; - int i; /* * This will fail and we want it. Some arch implementations do @@ -2747,6 +2749,14 @@ static int __init futex_init(void) */ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) futex_cmpxchg_enabled = 1; +#endif +} + +static int __init futex_init(void) +{ + int i; + + futex_detect_cmpxchg(); for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { plist_head_init(&futex_queues[i].chain); diff --git a/lib/nlattr.c b/lib/nlattr.c index 18eca78..fc67547 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -303,9 +303,15 @@ int nla_memcmp(const struct nlattr *nla, const void *data, */ int nla_strcmp(const struct nlattr *nla, const char *str) { - int len = strlen(str) + 1; - int d = nla_len(nla) - len; + int len = strlen(str); + char *buf = nla_data(nla); + int attrlen = nla_len(nla); + int d; + if (attrlen > 0 && buf[attrlen - 1] == '\0') + attrlen--; + + d = attrlen - len; if (d == 0) d = memcmp(nla_data(nla), str, len); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 2fb2d88..2c3a3b7 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -305,9 +305,11 @@ static void vlan_sync_address(struct net_device *dev, static void vlan_transfer_features(struct net_device *dev, struct net_device *vlandev) { + struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); + vlandev->gso_max_size = dev->gso_max_size; - if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) + if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) vlandev->hard_header_len = dev->hard_header_len; else vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4af64af..cf35f38 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -557,6 +557,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; + if (saddr == NULL) + saddr = dev->dev_addr; + return dev_hard_header(skb, real_dev, type, daddr, saddr, len); } @@ -608,7 +611,8 @@ static int vlan_dev_init(struct net_device *dev) #endif dev->needed_headroom = real_dev->needed_headroom; - if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { + if (vlan_hw_offload_capable(real_dev->features, + vlan_dev_priv(dev)->vlan_proto)) { dev->header_ops = &vlan_passthru_header_ops; dev->hard_header_len = real_dev->hard_header_len; } else { diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index b02fb47..29ff042 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1237,6 +1237,12 @@ static int br_ip6_multicast_query(struct net_bridge *br, (port && port->state == BR_STATE_DISABLED)) goto out; + /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */ + if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) { + err = -EINVAL; + goto out; + } + if (skb->len == sizeof(*mld)) { if (!pskb_may_pull(skb, sizeof(*mld))) { err = -EINVAL; diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index ac78024..b166fc2 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1044,10 +1044,9 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, if (repl->num_counters && copy_to_user(repl->counters, counterstmp, repl->num_counters * sizeof(struct ebt_counter))) { - ret = -EFAULT; + /* Silent error, can't fail, new table is already in place */ + net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); } - else - ret = 0; /* decrease module count and free resources */ EBT_ENTRY_ITERATE(table->entries, table->entries_size, diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 462cdc9..9b40f23 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -740,7 +740,7 @@ static bool pkt_is_ns(struct sk_buff *skb) struct nd_msg *msg; struct ipv6hdr *hdr; - if (skb->protocol != htons(ETH_P_ARP)) + if (skb->protocol != htons(ETH_P_IPV6)) return false; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) return false; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ca198c1d1..6f73a10 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1991,12 +1991,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo); static int nlmsg_populate_fdb_fill(struct sk_buff *skb, struct net_device *dev, u8 *addr, u32 pid, u32 seq, - int type, unsigned int flags) + int type, unsigned int flags, + int nlflags) { struct nlmsghdr *nlh; struct ndmsg *ndm; - nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); if (!nlh) return -EMSGSIZE; @@ -2034,7 +2035,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type) if (!skb) goto errout; - err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); + err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0); if (err < 0) { kfree_skb(skb); goto errout; @@ -2259,7 +2260,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb, err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, portid, seq, - RTM_NEWNEIGH, NTF_SELF); + RTM_NEWNEIGH, NTF_SELF, + NLM_F_MULTI); if (err < 0) return err; skip: diff --git a/net/core/sock.c b/net/core/sock.c index 2d92ff8..0e2b67e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2335,10 +2335,13 @@ void release_sock(struct sock *sk) if (sk->sk_backlog.tail) __release_sock(sk); + /* Warning : release_cb() might need to release sk ownership, + * ie call sock_release_ownership(sk) before us. + */ if (sk->sk_prot->release_cb) sk->sk_prot->release_cb(sk); - sk->sk_lock.owned = 0; + sock_release_ownership(sk); if (waitqueue_active(&sk->sk_lock.wq)) wake_up(&sk->sk_lock.wq); spin_unlock_bh(&sk->sk_lock.slock); diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index c5313a9..12b80fb 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -211,7 +211,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) } work = frag_mem_limit(nf) - nf->low_thresh; - while (work > 0) { + while (work > 0 || force) { spin_lock(&nf->lru_lock); if (list_empty(&nf->lru_list)) { @@ -281,9 +281,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, atomic_inc(&qp->refcnt); hlist_add_head(&qp->list, &hb->chain); + inet_frag_lru_add(nf, qp); spin_unlock(&hb->chain_lock); read_unlock(&f->lock); - inet_frag_lru_add(nf, qp); + return qp; } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 6fabc8d..aaa1be5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2255,13 +2255,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, } static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, - u32 portid, u32 seq, struct mfc_cache *c, int cmd) + u32 portid, u32 seq, struct mfc_cache *c, int cmd, + int flags) { struct nlmsghdr *nlh; struct rtmsg *rtm; int err; - nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); if (nlh == NULL) return -EMSGSIZE; @@ -2329,7 +2330,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, if (skb == NULL) goto errout; - err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); + err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); if (err < 0) goto errout; @@ -2368,7 +2369,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (ipmr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) goto done; next_entry: e++; @@ -2382,7 +2384,8 @@ next_entry: if (ipmr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) { + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) { spin_unlock_bh(&mfc_unres_lock); goto done; } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 85a4f21..c8abe31 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1039,8 +1039,10 @@ static int __do_replace(struct net *net, const char *name, xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, - sizeof(struct xt_counters) * num_counters) != 0) - ret = -EFAULT; + sizeof(struct xt_counters) * num_counters) != 0) { + /* Silent error, can't fail, new table is already in place */ + net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); + } vfree(counters); xt_table_unlock(t); return ret; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d23118d..651c107 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1226,8 +1226,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, - sizeof(struct xt_counters) * num_counters) != 0) - ret = -EFAULT; + sizeof(struct xt_counters) * num_counters) != 0) { + /* Silent error, can't fail, new table is already in place */ + net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); + } vfree(counters); xt_table_unlock(t); return ret; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 24b88d5..acc001e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -760,6 +760,17 @@ void tcp_release_cb(struct sock *sk) if (flags & (1UL << TCP_TSQ_DEFERRED)) tcp_tsq_handler(sk); + /* Here begins the tricky part : + * We are called from release_sock() with : + * 1) BH disabled + * 2) sk_lock.slock spinlock held + * 3) socket owned by us (sk->sk_lock.owned == 1) + * + * But following code is meant to be called from BH handlers, + * so we should keep BH disabled, but early release socket ownership + */ + sock_release_ownership(sk); + if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { tcp_write_timer_handler(sk); __sock_put(sk); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0f99f7b..a6ddced 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1113,8 +1113,11 @@ retry: * Lifetime is greater than REGEN_ADVANCE time units. In particular, * an implementation must not create a temporary address with a zero * Preferred Lifetime. + * Use age calculation as in addrconf_verify to avoid unnecessary + * temporary addresses being generated. */ - if (tmp_prefered_lft <= regen_advance) { + age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; + if (tmp_prefered_lft <= regen_advance + age) { in6_ifa_put(ifp); in6_dev_put(idev); ret = -1; diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c index cf77f3a..447a7fb 100644 --- a/net/ipv6/exthdrs_offload.c +++ b/net/ipv6/exthdrs_offload.c @@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void) int ret; ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); - if (!ret) + if (ret) goto out; ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); - if (!ret) + if (ret) goto out_rt; out: diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 7cfc8d2..eea0e24 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -516,7 +516,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) np->tclass, NULL, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, np->dontfrag); if (err) { - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 554eb0d..d18c00a 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1110,21 +1110,19 @@ static void ip6_append_data_mtu(unsigned int *mtu, unsigned int fragheaderlen, struct sk_buff *skb, struct rt6_info *rt, - bool pmtuprobe) + unsigned int orig_mtu) { if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { if (skb == NULL) { /* first fragment, reserve header_len */ - *mtu = *mtu - rt->dst.header_len; + *mtu = orig_mtu - rt->dst.header_len; } else { /* * this fragment is not first, the headers * space is regarded as data space. */ - *mtu = min(*mtu, pmtuprobe ? - rt->dst.dev->mtu : - dst_mtu(rt->dst.path)); + *mtu = orig_mtu; } *maxfraglen = ((*mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); @@ -1141,7 +1139,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, struct ipv6_pinfo *np = inet6_sk(sk); struct inet_cork *cork; struct sk_buff *skb, *skb_prev = NULL; - unsigned int maxfraglen, fragheaderlen, mtu; + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; int exthdrlen; int dst_exthdrlen; int hh_len; @@ -1223,6 +1221,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, dst_exthdrlen = 0; mtu = cork->fragsize; } + orig_mtu = mtu; hh_len = LL_RESERVED_SPACE(rt->dst.dev); @@ -1302,8 +1301,7 @@ alloc_new_skb: if (skb == NULL || skb_prev == NULL) ip6_append_data_mtu(&mtu, &maxfraglen, fragheaderlen, skb, rt, - np->pmtudisc == - IPV6_PMTUDISC_PROBE); + orig_mtu); skb_prev = skb; @@ -1559,8 +1557,8 @@ int ip6_push_pending_frames(struct sock *sk) if (proto == IPPROTO_ICMPV6) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); - ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); + ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type); + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); } err = ip6_local_out(skb); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0caf013..1292075 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2351,13 +2351,14 @@ int ip6mr_get_route(struct net *net, } static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, - u32 portid, u32 seq, struct mfc6_cache *c, int cmd) + u32 portid, u32 seq, struct mfc6_cache *c, int cmd, + int flags) { struct nlmsghdr *nlh; struct rtmsg *rtm; int err; - nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); if (nlh == NULL) return -EMSGSIZE; @@ -2425,7 +2426,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc, if (skb == NULL) goto errout; - err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); + err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); if (err < 0) goto errout; @@ -2464,7 +2465,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (ip6mr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) goto done; next_entry: e++; @@ -2478,7 +2480,8 @@ next_entry: if (ip6mr_fill_mroute(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - mfc, RTM_NEWROUTE) < 0) { + mfc, RTM_NEWROUTE, + NLM_F_MULTI) < 0) { spin_unlock_bh(&mfc_unres_lock); goto done; } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6b84fc0..8e30bed 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1448,11 +1448,12 @@ static void mld_sendpack(struct sk_buff *skb) dst_output); out: if (!err) { - ICMP6MSGOUT_INC_STATS_BH(net, idev, ICMPV6_MLD2_REPORT); - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); - IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_OUTMCAST, payload_len); - } else - IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); + ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT); + ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS); + IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len); + } else { + IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); + } rcu_read_unlock(); return; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 44400c2..89a4e4d 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1236,8 +1236,10 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, - sizeof(struct xt_counters) * num_counters) != 0) - ret = -EFAULT; + sizeof(struct xt_counters) * num_counters) != 0) { + /* Silent error, can't fail, new table is already in place */ + net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); + } vfree(counters); xt_table_unlock(t); return ret; diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 7856e96..6acab0b 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -182,8 +182,8 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, MSG_DONTWAIT, np->dontfrag); if (err) { - ICMP6_INC_STATS_BH(sock_net(sk), rt->rt6i_idev, - ICMP6_MIB_OUTERRORS); + ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, + ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2a0f219..bf7a99f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1451,7 +1451,7 @@ int ip6_route_add(struct fib6_config *cfg) if (!table) goto out; - rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table); + rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); if (!rt) { err = -ENOMEM; diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index a99b6c3..59359be 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb, const char *msg; u_int8_t state; - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh); + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); BUG_ON(dh == NULL); state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE]; @@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, u_int8_t type, old_state, new_state; enum ct_dccp_roles role; - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh); + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); BUG_ON(dh == NULL); type = dh->dccph_type; @@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl, unsigned int cscov; const char *msg; - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh); + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); if (dh == NULL) { msg = "nf_ct_dccp: short packet "; goto out_invalid; diff --git a/net/rds/iw.c b/net/rds/iw.c index 7826d46..5899356 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c @@ -239,7 +239,8 @@ static int rds_iw_laddr_check(__be32 addr) ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); /* due to this, we will claim to support IB devices unless we check node_type. */ - if (ret || cm_id->device->node_type != RDMA_NODE_RNIC) + if (ret || !cm_id->device || + cm_id->device->node_type != RDMA_NODE_RNIC) ret = -EADDRNOTAVAIL; rdsdebug("addr %pI4 ret %d node type %d\n", diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 362ae6e..da85571 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1400,8 +1400,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); - /* Free the chunk skb data and the SCTP_chunk stub itself. */ - dev_kfree_skb(chunk->skb); + consume_skb(chunk->skb); + consume_skb(chunk->auth_chunk); SCTP_DBG_OBJCNT_DEC(chunk); kmem_cache_free(sctp_chunk_cachep, chunk); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7f59623..261c730 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -767,7 +767,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, /* Make sure that we and the peer are AUTH capable */ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { - kfree_skb(chunk->auth_chunk); sctp_association_free(new_asoc); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } @@ -782,10 +781,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, auth.transport = chunk->transport; ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); - - /* We can now safely free the auth_chunk clone */ - kfree_skb(chunk->auth_chunk); - if (ret != SCTP_IERROR_NO_ERROR) { sctp_association_free(new_asoc); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); diff --git a/net/socket.c b/net/socket.c index 03d5324..6bfb9e4 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1981,6 +1981,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, { if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) return -EFAULT; + + if (kmsg->msg_namelen < 0) + return -EINVAL; + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); return 0; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2ea3310..c80c107 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1793,8 +1793,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, goto out; err = mutex_lock_interruptible(&u->readlock); - if (err) { - err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); + if (unlikely(err)) { + /* recvmsg() in non blocking mode is supposed to return -EAGAIN + * sk_rcvtimeo is not honored by mutex_lock_interruptible() + */ + err = noblock ? -EAGAIN : -ERESTARTSYS; goto out; } @@ -1914,6 +1917,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = msg->msg_name; int copied = 0; + int noblock = flags & MSG_DONTWAIT; int check_creds = 0; int target; int err = 0; @@ -1929,7 +1933,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, goto out; target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); - timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); + timeo = sock_rcvtimeo(sk, noblock); /* Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg @@ -1941,8 +1945,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, } err = mutex_lock_interruptible(&u->readlock); - if (err) { - err = sock_intr_errno(timeo); + if (unlikely(err)) { + /* recvmsg() in non blocking mode is supposed to return -EAGAIN + * sk_rcvtimeo is not honored by mutex_lock_interruptible() + */ + err = noblock ? -EAGAIN : -ERESTARTSYS; goto out; } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index bf64893..021765e 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1386,15 +1386,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent isec->sid = sbsec->sid; if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) { - if (opt_dentry) { - isec->sclass = inode_mode_to_security_class(inode->i_mode); - rc = selinux_proc_get_sid(opt_dentry, - isec->sclass, - &sid); - if (rc) - goto out_unlock; - isec->sid = sid; - } + /* We must have a dentry to determine the label on + * procfs inodes */ + if (opt_dentry) + /* Called from d_instantiate or + * d_splice_alias. */ + dentry = dget(opt_dentry); + else + /* Called from selinux_complete_init, try to + * find a dentry. */ + dentry = d_find_alias(inode); + /* + * This can be hit on boot when a file is accessed + * before the policy is loaded. When we load policy we + * may find inodes that have no dentry on the + * sbsec->isec_head list. No reason to complain as + * these will get fixed up the next time we go through + * inode_doinit() with a dentry, before these inodes + * could be used again by userspace. + */ + if (!dentry) + goto out_unlock; + isec->sclass = inode_mode_to_security_class(inode->i_mode); + rc = selinux_proc_get_sid(dentry, isec->sclass, &sid); + dput(dentry); + if (rc) + goto out_unlock; + isec->sid = sid; } break; } -- To unsubscribe from this list: send the line "unsubscribe stable" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html