diff --git a/Makefile b/Makefile index 274ec43fc6ba..f44ed53ab175 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 18 -SUBLEVEL = 53 +SUBLEVEL = 54 EXTRAVERSION = NAME = Diseased Newt diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index aa29ecb4f800..78a5894b1621 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -316,22 +316,31 @@ out: } static int -validate_event(struct pmu_hw_events *hw_events, - struct perf_event *event) +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) { - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct arm_pmu *armpmu; struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; if (is_software_event(event)) return 1; + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; + armpmu = to_arm_pmu(event->pmu); return armpmu->get_event_idx(hw_events, &fake_event) >= 0; } @@ -349,15 +358,15 @@ validate_group(struct perf_event *event) memset(fake_used_mask, 0, sizeof(fake_used_mask)); fake_pmu.used_mask = fake_used_mask; - if (!validate_event(&fake_pmu, leader)) + if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) + if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } - if (!validate_event(&fake_pmu, event)) + if (!validate_event(event->pmu, &fake_pmu, event)) return -EINVAL; return 0; diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index 3fa98ff14f0e..df20b7918854 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -50,7 +50,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, * The sys_call_table array must be 4K aligned to be accessible from * kernel/entry.S. */ -void *sys_call_table[__NR_syscalls] __aligned(4096) = { +void * const sys_call_table[__NR_syscalls] __aligned(4096) = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, #include <asm/unistd.h> }; diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index bd49ec61255c..f673f92596fa 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h @@ -16,7 +16,7 @@ #ifndef BOOT_BOOT_H #define BOOT_BOOT_H -#define STACK_SIZE 512 /* Minimum number of bytes for stack */ +#define STACK_SIZE 1024 /* Minimum number of bytes for stack */ #ifndef __ASSEMBLY__ diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c index 3b52bf0b418a..b0d25e6ef464 100644 --- a/arch/x86/um/ptrace_64.c +++ b/arch/x86/um/ptrace_64.c @@ -120,7 +120,7 @@ int poke_user(struct task_struct *child, long addr, long data) else if ((addr >= offsetof(struct user, u_debugreg[0])) && (addr <= offsetof(struct user, u_debugreg[7]))) { addr -= offsetof(struct user, u_debugreg[0]); - addr = addr >> 2; + addr = addr >> 3; if ((addr == 4) || (addr == 5)) return -EIO; child->thread.arch.debugregs[addr] = data; diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 19a3a12f3257..34dda44cb910 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1213,6 +1213,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, /* Ignore report if ErrorRollOver */ if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && value[n] >= min && value[n] <= max && + value[n] - min < field->maxusage && field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) goto exit; } @@ -1225,11 +1226,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field, } if (field->value[n] >= min && field->value[n] <= max + && field->value[n] - min < field->maxusage && field->usage[field->value[n] - min].hid && search(value, field->value[n], count)) hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt); if (value[n] >= min && value[n] <= max + && value[n] - min < field->maxusage && field->usage[value[n] - min].hid && search(field->value, value[n], count)) hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 3b619b10a372..420ae23d064d 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2357,6 +2357,7 @@ err_counter: mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]); err_map: + mlx4_ib_free_eqs(dev, ibdev); iounmap(ibdev->uar_map); err_uar: diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c index 6bd5740e2691..09396bd7b02d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c @@ -281,8 +281,11 @@ void ipoib_delete_debug_files(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); + WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n"); + WARN_ONCE(!priv->path_dentry, "null path debug file\n"); debugfs_remove(priv->mcg_dentry); debugfs_remove(priv->path_dentry); + priv->mcg_dentry = priv->path_dentry = NULL; } int ipoib_register_debugfs(void) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 483ddbd1bdc8..5e08db6f9d8c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -98,6 +98,33 @@ static struct ib_client ipoib_client = { .remove = ipoib_remove_one }; +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +static int ipoib_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct netdev_notifier_info *ni = ptr; + struct net_device *dev = ni->dev; + + if (dev->netdev_ops->ndo_open != ipoib_open) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_REGISTER: + ipoib_create_debug_files(dev); + break; + case NETDEV_CHANGENAME: + ipoib_delete_debug_files(dev); + ipoib_create_debug_files(dev); + break; + case NETDEV_UNREGISTER: + ipoib_delete_debug_files(dev); + break; + } + + return NOTIFY_DONE; +} +#endif + int ipoib_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); @@ -1304,8 +1331,6 @@ void ipoib_dev_cleanup(struct net_device *dev) ASSERT_RTNL(); - ipoib_delete_debug_files(dev); - /* Delete any child interfaces first */ list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) { /* Stop GC on child */ @@ -1610,8 +1635,6 @@ static struct net_device *ipoib_add_port(const char *format, goto register_failed; } - ipoib_create_debug_files(priv->dev); - if (ipoib_cm_add_mode_attr(priv->dev)) goto sysfs_failed; if (ipoib_add_pkey_attr(priv->dev)) @@ -1626,7 +1649,6 @@ static struct net_device *ipoib_add_port(const char *format, return priv->dev; sysfs_failed: - ipoib_delete_debug_files(priv->dev); unregister_netdev(priv->dev); register_failed: @@ -1714,6 +1736,12 @@ static void ipoib_remove_one(struct ib_device *device) kfree(dev_list); } +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG +static struct notifier_block ipoib_netdev_notifier = { + .notifier_call = ipoib_netdev_event, +}; +#endif + static int __init ipoib_init_module(void) { int ret; @@ -1763,6 +1791,9 @@ static int __init ipoib_init_module(void) if (ret) goto err_client; +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + register_netdevice_notifier(&ipoib_netdev_notifier); +#endif return 0; err_client: @@ -1780,6 +1811,9 @@ err_fs: static void __exit ipoib_cleanup_module(void) { +#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG + unregister_netdevice_notifier(&ipoib_netdev_notifier); +#endif ipoib_netlink_fini(); ib_unregister_client(&ipoib_client); ib_sa_unregister_client(&ipoib_sa_client); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 9fad7b5ac8b9..c995681befee 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -86,8 +86,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, priv->parent = ppriv->dev; - ipoib_create_debug_files(priv->dev); - /* RTNL childs don't need proprietary sysfs entries */ if (type == IPOIB_LEGACY_CHILD) { if (ipoib_cm_add_mode_attr(priv->dev)) @@ -109,7 +107,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv, sysfs_failed: result = -ENOMEM; - ipoib_delete_debug_files(priv->dev); unregister_netdevice(priv->dev); register_failed: diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index ad913cd4aded..330151d6ee42 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -957,15 +957,15 @@ static int metadata_commit(struct era_metadata *md) } } - r = save_sm_root(md); + r = dm_tm_pre_commit(md->tm); if (r) { - DMERR("%s: save_sm_root failed", __func__); + DMERR("%s: pre commit failed", __func__); return r; } - r = dm_tm_pre_commit(md->tm); + r = save_sm_root(md); if (r) { - DMERR("%s: pre commit failed", __func__); + DMERR("%s: save_sm_root failed", __func__); return r; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 0b6fa15825aa..b194ccfd3a36 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2067,6 +2067,8 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + continue; wbio->bi_rw = WRITE; wbio->bi_end_io = end_sync_write; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e3fbbbbd84e7..3dd1c19756ec 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2342,8 +2342,6 @@ ppp_unregister_channel(struct ppp_channel *chan) spin_lock_bh(&pn->all_channels_lock); list_del(&pch->list); spin_unlock_bh(&pn->all_channels_lock); - put_net(pch->chan_net); - pch->chan_net = NULL; pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); @@ -2960,6 +2958,9 @@ ppp_disconnect_channel(struct channel *pch) */ static void ppp_destroy_channel(struct channel *pch) { + put_net(pch->chan_net); + pch->chan_net = NULL; + atomic_dec(&channel_count); if (!pch->file.dead) { diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 614b4ca6420a..02e7b9e6a641 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -952,23 +952,12 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) { - int ret; - /* MBIM backwards compatible function? */ if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) return -ENODEV; /* The NCM data altsetting is fixed */ - ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM); - - /* - * We should get an event when network connection is "connected" or - * "disconnected". Set network connection in "disconnected" state - * (carrier is OFF) during attach, so the IP network stack does not - * start IPv6 negotiation and more. - */ - usbnet_link_change(dev, 0, 0); - return ret; + return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM); } static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max) @@ -1510,7 +1499,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) static const struct driver_info cdc_ncm_info = { .description = "CDC NCM", - .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET + | FLAG_LINK_INTR, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, @@ -1523,7 +1513,7 @@ static const struct driver_info cdc_ncm_info = { static const struct driver_info wwan_info = { .description = "Mobile Broadband Network Device", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET - | FLAG_WWAN, + | FLAG_LINK_INTR | FLAG_WWAN, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, @@ -1536,7 +1526,7 @@ static const struct driver_info wwan_info = { static const struct driver_info wwan_noarp_info = { .description = "Mobile Broadband Network Device (NO ARP)", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET - | FLAG_WWAN | FLAG_NOARP, + | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, .manage_power = usbnet_manage_power, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index e50adf710229..71b30e18f2f0 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -791,8 +791,14 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { - if (srp->bio) + if (srp->bio) { + if (srp->rq->cmd != srp->rq->__cmd) + kfree(srp->rq->cmd); + blk_end_request_all(srp->rq, -EIO); + srp->rq = NULL; + } + sg_finish_rem_req(srp); return -ENODEV; } diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index 81fab2dfafa4..6d2fef1f1c9c 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -611,7 +611,7 @@ static void jr3_pci_poll_dev(unsigned long data) s = &dev->subdevices[i]; spriv = s->private; - if (now > spriv->next_time_min) { + if (time_after_eq(now, spriv->next_time_min)) { struct jr3_pci_poll_delay sub_delay; sub_delay = jr3_pci_poll_subdevice(s); @@ -729,11 +729,12 @@ static int jr3_pci_auto_attach(struct comedi_device *dev, s->insn_read = jr3_pci_ai_insn_read; spriv = jr3_pci_alloc_spriv(dev, s); - if (spriv) { - /* Channel specific range and maxdata */ - s->range_table_list = spriv->range_table_list; - s->maxdata_list = spriv->maxdata_list; - } + if (!spriv) + return -ENOMEM; + + /* Channel specific range and maxdata */ + s->range_table_list = spriv->range_table_list; + s->maxdata_list = spriv->maxdata_list; } /* Reset DSP card */ diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c index fe410445a1af..40f979c18197 100644 --- a/drivers/staging/gdm724x/gdm_mux.c +++ b/drivers/staging/gdm724x/gdm_mux.c @@ -674,14 +674,13 @@ static int __init gdm_usb_mux_init(void) static void __exit gdm_usb_mux_exit(void) { - unregister_lte_tty_driver(); - if (mux_rx_wq) { flush_workqueue(mux_rx_wq); destroy_workqueue(mux_rx_wq); } usb_deregister(&gdm_mux_driver); + unregister_lte_tty_driver(); } module_init(gdm_usb_mux_init); diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c index 88bf518f23eb..776ce2d9752a 100644 --- a/drivers/staging/vt6656/usbpipe.c +++ b/drivers/staging/vt6656/usbpipe.c @@ -50,15 +50,25 @@ int vnt_control_out(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { int status = 0; + u8 *usb_buffer; if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) return STATUS_FAILURE; mutex_lock(&priv->usb_lock); + usb_buffer = kmemdup(buffer, length, GFP_KERNEL); + if (!usb_buffer) { + mutex_unlock(&priv->usb_lock); + return -ENOMEM; + } + status = usb_control_msg(priv->usb, - usb_sndctrlpipe(priv->usb, 0), request, 0x40, value, - index, buffer, length, USB_CTL_WAIT); + usb_sndctrlpipe(priv->usb, 0), + request, 0x40, value, + index, usb_buffer, length, USB_CTL_WAIT); + + kfree(usb_buffer); mutex_unlock(&priv->usb_lock); @@ -78,15 +88,28 @@ int vnt_control_in(struct vnt_private *priv, u8 request, u16 value, u16 index, u16 length, u8 *buffer) { int status; + u8 *usb_buffer; if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags)) return STATUS_FAILURE; mutex_lock(&priv->usb_lock); + usb_buffer = kmalloc(length, GFP_KERNEL); + if (!usb_buffer) { + mutex_unlock(&priv->usb_lock); + return -ENOMEM; + } + status = usb_control_msg(priv->usb, - usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value, - index, buffer, length, USB_CTL_WAIT); + usb_rcvctrlpipe(priv->usb, 0), + request, 0xc0, value, + index, usb_buffer, length, USB_CTL_WAIT); + + if (status == length) + memcpy(buffer, usb_buffer, length); + + kfree(usb_buffer); mutex_unlock(&priv->usb_lock); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 88c9179ab20f..9233b653cc72 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -760,8 +760,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - if (ret) - target_complete_cmd(cmd, SAM_STAT_GOOD); + target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 18c30cabe27f..ef46bbb1086f 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1743,7 +1743,8 @@ static int serial_omap_probe(struct platform_device *pdev) return 0; err_add_port: - pm_runtime_put(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); err_rs485: err_port_line: @@ -1756,9 +1757,13 @@ static int serial_omap_remove(struct platform_device *dev) { struct uart_omap_port *up = platform_get_drvdata(dev); + pm_runtime_get_sync(up->dev); + + uart_remove_one_port(&serial_omap_reg, &up->port); + + pm_runtime_dont_use_autosuspend(up->dev); pm_runtime_put_sync(up->dev); pm_runtime_disable(up->dev); - uart_remove_one_port(&serial_omap_reg, &up->port); pm_qos_remove_request(&up->pm_qos_request); device_init_wakeup(&dev->dev, false); diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 3c6adc16b34f..31079f7e4e3b 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -1757,6 +1757,9 @@ static int autosuspend_check(struct usb_device *udev) int w, i; struct usb_interface *intf; + if (udev->state == USB_STATE_NOTATTACHED) + return -ENODEV; + /* Fail if autosuspend is disabled, or any interfaces are in use, or * any interface drivers require remote wakeup but it isn't available. */ diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index ea337a718cc1..b3de806085f0 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -26,6 +26,7 @@ #define MAX_USB_MINORS 256 static const struct file_operations *usb_minors[MAX_USB_MINORS]; static DECLARE_RWSEM(minor_rwsem); +static DEFINE_MUTEX(init_usb_class_mutex); static int usb_open(struct inode *inode, struct file *file) { @@ -108,8 +109,9 @@ static void release_usb_class(struct kref *kref) static void destroy_usb_class(void) { - if (usb_class) - kref_put(&usb_class->kref, release_usb_class); + mutex_lock(&init_usb_class_mutex); + kref_put(&usb_class->kref, release_usb_class); + mutex_unlock(&init_usb_class_mutex); } int usb_major_init(void) @@ -171,7 +173,10 @@ int usb_register_dev(struct usb_interface *intf, if (intf->minor >= 0) return -EADDRINUSE; + mutex_lock(&init_usb_class_mutex); retval = init_usb_class(); + mutex_unlock(&init_usb_class_mutex); + if (retval) return retval; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a3c136b03810..1e70437f8281 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2100,6 +2100,12 @@ void usb_disconnect(struct usb_device **pdev) dev_info(&udev->dev, "USB disconnect, device number %d\n", udev->devnum); + /* + * Ensure that the pm runtime code knows that the USB device + * is in the process of being disconnected. + */ + pm_runtime_barrier(&udev->dev); + usb_lock_device(udev); hub_disconnect_children(udev); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 150cab3290f8..0fb672f385d8 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2428,7 +2428,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | xhci->cmd_ring->cycle_state; xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Setting command ring address to 0x%x", val); + "// Setting command ring address to 0x%016llx", val_64); xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); xhci_dbg_cmd_ptrs(xhci); diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 97cd9e24bd25..34b5d60b477a 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -317,9 +317,16 @@ static int tower_open (struct inode *inode, struct file *file) int subminor; int retval = 0; struct usb_interface *interface; - struct tower_reset_reply reset_reply; + struct tower_reset_reply *reset_reply; int result; + reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL); + + if (!reset_reply) { + retval = -ENOMEM; + goto exit; + } + nonseekable_open(inode, file); subminor = iminor(inode); @@ -364,8 +371,8 @@ static int tower_open (struct inode *inode, struct file *file) USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, - &reset_reply, - sizeof(reset_reply), + reset_reply, + sizeof(*reset_reply), 1000); if (result < 0) { dev_err(&dev->udev->dev, @@ -406,6 +413,7 @@ unlock_exit: mutex_unlock(&dev->lock); exit: + kfree(reset_reply); return retval; } @@ -808,7 +816,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device struct lego_usb_tower *dev = NULL; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor* endpoint; - struct tower_get_version_reply get_version_reply; + struct tower_get_version_reply *get_version_reply = NULL; int i; int retval = -ENOMEM; int result; @@ -916,6 +924,13 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device "%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE), USB_MAJOR, dev->minor); + get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL); + + if (!get_version_reply) { + retval = -ENOMEM; + goto error; + } + /* get the firmware version and log it */ result = usb_control_msg (udev, usb_rcvctrlpipe(udev, 0), @@ -923,24 +938,26 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, 0, 0, - &get_version_reply, - sizeof(get_version_reply), + get_version_reply, + sizeof(*get_version_reply), 1000); if (result < 0) { dev_err(idev, "LEGO USB Tower get version control request failed\n"); retval = result; goto error; } - dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d " - "build %d\n", get_version_reply.major, - get_version_reply.minor, - le16_to_cpu(get_version_reply.build_no)); + dev_info(&interface->dev, + "LEGO USB Tower firmware version is %d.%d build %d\n", + get_version_reply->major, + get_version_reply->minor, + le16_to_cpu(get_version_reply->build_no)); exit: return retval; error: + kfree(get_version_reply); tower_delete(dev); return retval; } diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index c33ad2181b33..58b4c2828ee9 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -133,6 +133,7 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf) case USB_ENDPOINT_XFER_INT: if (dev->info->intr) goto try_intr; + continue; case USB_ENDPOINT_XFER_ISOC: if (dev->info->iso) goto try_iso; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index fdd78e3ad018..7ce28ef191e3 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -873,6 +873,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, USB_CLASS_VENDOR_SPEC, USB_SUBCLASS_VENDOR_SPEC, 0x00) }, + { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index db1a9b3a5f38..fbfa395dfecc 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -867,6 +867,12 @@ #define FIC_VID 0x1457 #define FIC_NEO1973_DEBUG_PID 0x5118 +/* + * Actel / Microsemi + */ +#define ACTEL_VID 0x1514 +#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008 + /* Olimex */ #define OLIMEX_VID 0x15BA #define OLIMEX_ARM_USB_OCD_PID 0x0003 diff --git a/fs/block_dev.c b/fs/block_dev.c index 543dcf972120..8e42e76213ed 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -86,12 +86,11 @@ void invalidate_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; - if (mapping->nrpages == 0) - return; - - invalidate_bh_lrus(); - lru_add_drain_all(); /* make sure all lru add caches are flushed */ - invalidate_mapping_pages(mapping, 0, -1); + if (mapping->nrpages) { + invalidate_bh_lrus(); + lru_add_drain_all(); /* make sure all lru add caches are flushed */ + invalidate_mapping_pages(mapping, 0, -1); + } /* 99% of the time, we don't need to flush the cleancache on the bdev. * But, for the strange corners, lets be cautious */ diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 678b0d2bbbc4..f827d5537c28 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -369,6 +369,7 @@ static int __set_xattr(struct ceph_inode_info *ci, if (update_xattr) { int err = 0; + if (xattr && (flags & XATTR_CREATE)) err = -EEXIST; else if (!xattr && (flags & XATTR_REPLACE)) @@ -376,12 +377,14 @@ static int __set_xattr(struct ceph_inode_info *ci, if (err) { kfree(name); kfree(val); + kfree(*newxattr); return err; } if (update_xattr < 0) { if (xattr) __remove_xattr(ci, xattr); kfree(name); + kfree(*newxattr); return 0; } } diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index f7fb4b8658ba..4c7752639067 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -118,6 +118,9 @@ convert_sfm_char(const __u16 src_char, char *target) case SFM_COLON: *target = ':'; break; + case SFM_DOUBLEQUOTE: + *target = '"'; + break; case SFM_ASTERISK: *target = '*'; break; @@ -378,6 +381,9 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string) case ':': dest_char = cpu_to_le16(SFM_COLON); break; + case '"': + dest_char = cpu_to_le16(SFM_DOUBLEQUOTE); + break; case '*': dest_char = cpu_to_le16(SFM_ASTERISK); break; diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h index 479bc0a941f3..07ade707fa60 100644 --- a/fs/cifs/cifs_unicode.h +++ b/fs/cifs/cifs_unicode.h @@ -57,6 +57,7 @@ * not conflict (although almost does) with the mapping above. */ +#define SFM_DOUBLEQUOTE ((__u16) 0xF020) #define SFM_ASTERISK ((__u16) 0xF021) #define SFM_QUESTION ((__u16) 0xF025) #define SFM_COLON ((__u16) 0xF022) @@ -64,8 +65,8 @@ #define SFM_LESSTHAN ((__u16) 0xF023) #define SFM_PIPE ((__u16) 0xF027) #define SFM_SLASH ((__u16) 0xF026) -#define SFM_PERIOD ((__u16) 0xF028) -#define SFM_SPACE ((__u16) 0xF029) +#define SFM_SPACE ((__u16) 0xF028) +#define SFM_PERIOD ((__u16) 0xF029) /* * Mapping mechanism to use when one of the seven reserved characters is diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 845148d017d2..8e1a17c64ddd 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -715,6 +715,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server) if (rc) return rc; + if (server->capabilities & CAP_UNICODE) + smb->hdr.Flags2 |= SMBFLG2_UNICODE; + /* set up echo request */ smb->hdr.Tid = 0xffff; smb->hdr.WordCount = 1; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 1f3e6c74e49e..084337361d53 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -491,8 +491,12 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) } if (rsplen != sizeof(struct validate_negotiate_info_rsp)) { - cifs_dbg(VFS, "invalid size of protocol negotiate response\n"); - return -EIO; + cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n", + rsplen); + + /* relax check since Mac returns max bufsize allowed on ioctl */ + if (rsplen > CIFSMaxBufSize) + return -EIO; } /* check validate negotiate info response matches what we got earlier */ @@ -1308,8 +1312,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, * than one credit. Windows typically sets this smaller, but for some * ioctls it may be useful to allow server to send more. No point * limiting what the server can send as long as fits in one credit + * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE + * (by default, note that it can be overridden to make max larger) + * in responses (except for read responses which can be bigger. + * We may want to bump this limit up */ - req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ + req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize); if (is_fsctl) req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index d41843181818..e770c1ee4613 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -88,13 +88,13 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) return 0; } + err = handle->h_err; if (!handle->h_transaction) { - err = jbd2_journal_stop(handle); - return handle->h_err ? handle->h_err : err; + rc = jbd2_journal_stop(handle); + return err ? err : rc; } sb = handle->h_transaction->t_journal->j_private; - err = handle->h_err; rc = jbd2_journal_stop(handle); if (!err) diff --git a/fs/xattr.c b/fs/xattr.c index 64e83efb742d..2ac964e32daf 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -444,7 +444,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, size = XATTR_SIZE_MAX; kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); if (!kvalue) { - vvalue = vmalloc(size); + vvalue = vzalloc(size); if (!vvalue) return -ENOMEM; kvalue = vvalue; diff --git a/kernel/events/core.c b/kernel/events/core.c index 26c40faa8ea4..4886c0e97bbd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -907,6 +907,77 @@ static void put_ctx(struct perf_event_context *ctx) } /* + * Because of perf_event::ctx migration in sys_perf_event_open::move_group and + * perf_pmu_migrate_context() we need some magic. + * + * Those places that change perf_event::ctx will hold both + * perf_event_ctx::mutex of the 'old' and 'new' ctx value. + * + * Lock ordering is by mutex address. There is one other site where + * perf_event_context::mutex nests and that is put_event(). But remember that + * that is a parent<->child context relation, and migration does not affect + * children, therefore these two orderings should not interact. + * + * The change in perf_event::ctx does not affect children (as claimed above) + * because the sys_perf_event_open() case will install a new event and break + * the ctx parent<->child relation, and perf_pmu_migrate_context() is only + * concerned with cpuctx and that doesn't have children. + * + * The places that change perf_event::ctx will issue: + * + * perf_remove_from_context(); + * synchronize_rcu(); + * perf_install_in_context(); + * + * to affect the change. The remove_from_context() + synchronize_rcu() should + * quiesce the event, after which we can install it in the new location. This + * means that only external vectors (perf_fops, prctl) can perturb the event + * while in transit. Therefore all such accessors should also acquire + * perf_event_context::mutex to serialize against this. + * + * However; because event->ctx can change while we're waiting to acquire + * ctx->mutex we must be careful and use the below perf_event_ctx_lock() + * function. + * + * Lock order: + * task_struct::perf_event_mutex + * perf_event_context::mutex + * perf_event_context::lock + * perf_event::child_mutex; + * perf_event::mmap_mutex + * mmap_sem + */ +static struct perf_event_context *perf_event_ctx_lock(struct perf_event *event) +{ + struct perf_event_context *ctx; + +again: + rcu_read_lock(); + ctx = ACCESS_ONCE(event->ctx); + if (!atomic_inc_not_zero(&ctx->refcount)) { + rcu_read_unlock(); + goto again; + } + rcu_read_unlock(); + + mutex_lock(&ctx->mutex); + if (event->ctx != ctx) { + mutex_unlock(&ctx->mutex); + put_ctx(ctx); + goto again; + } + + return ctx; +} + +static void perf_event_ctx_unlock(struct perf_event *event, + struct perf_event_context *ctx) +{ + mutex_unlock(&ctx->mutex); + put_ctx(ctx); +} + +/* * This must be done under the ctx->lock, such as to serialize against * context_equiv(), therefore we cannot call put_ctx() since that might end up * calling scheduler related locks and ctx->lock nests inside those. @@ -1654,7 +1725,7 @@ int __perf_event_disable(void *info) * is the current context on this CPU and preemption is disabled, * hence we can't get into perf_event_task_sched_out for this context. */ -void perf_event_disable(struct perf_event *event) +static void _perf_event_disable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; @@ -1695,6 +1766,19 @@ retry: } raw_spin_unlock_irq(&ctx->lock); } + +/* + * Strictly speaking kernel users cannot create groups and therefore this + * interface does not need the perf_event_ctx_lock() magic. + */ +void perf_event_disable(struct perf_event *event) +{ + struct perf_event_context *ctx; + + ctx = perf_event_ctx_lock(event); + _perf_event_disable(event); + perf_event_ctx_unlock(event, ctx); +} EXPORT_SYMBOL_GPL(perf_event_disable); static void perf_set_shadow_time(struct perf_event *event, @@ -2158,7 +2242,7 @@ unlock: * perf_event_for_each_child or perf_event_for_each as described * for perf_event_disable. */ -void perf_event_enable(struct perf_event *event) +static void _perf_event_enable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; @@ -2214,9 +2298,21 @@ retry: out: raw_spin_unlock_irq(&ctx->lock); } + +/* + * See perf_event_disable(); + */ +void perf_event_enable(struct perf_event *event) +{ + struct perf_event_context *ctx; + + ctx = perf_event_ctx_lock(event); + _perf_event_enable(event); + perf_event_ctx_unlock(event, ctx); +} EXPORT_SYMBOL_GPL(perf_event_enable); -int perf_event_refresh(struct perf_event *event, int refresh) +static int _perf_event_refresh(struct perf_event *event, int refresh) { /* * not supported on inherited events @@ -2225,10 +2321,25 @@ int perf_event_refresh(struct perf_event *event, int refresh) return -EINVAL; atomic_add(refresh, &event->event_limit); - perf_event_enable(event); + _perf_event_enable(event); return 0; } + +/* + * See perf_event_disable() + */ +int perf_event_refresh(struct perf_event *event, int refresh) +{ + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = _perf_event_refresh(event, refresh); + perf_event_ctx_unlock(event, ctx); + + return ret; +} EXPORT_SYMBOL_GPL(perf_event_refresh); static void ctx_sched_out(struct perf_event_context *ctx, @@ -3421,7 +3532,16 @@ static void perf_remove_from_owner(struct perf_event *event) rcu_read_unlock(); if (owner) { - mutex_lock(&owner->perf_event_mutex); + /* + * If we're here through perf_event_exit_task() we're already + * holding ctx->mutex which would be an inversion wrt. the + * normal lock order. + * + * However we can safely take this lock because its the child + * ctx->mutex. + */ + mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); + /* * We have to re-check the event->owner field, if it is cleared * we raced with perf_event_exit_task(), acquiring the mutex @@ -3547,12 +3667,13 @@ static int perf_event_read_group(struct perf_event *event, u64 read_format, char __user *buf) { struct perf_event *leader = event->group_leader, *sub; - int n = 0, size = 0, ret = -EFAULT; struct perf_event_context *ctx = leader->ctx; - u64 values[5]; + int n = 0, size = 0, ret; u64 count, enabled, running; + u64 values[5]; + + lockdep_assert_held(&ctx->mutex); - mutex_lock(&ctx->mutex); count = perf_event_read_value(leader, &enabled, &running); values[n++] = 1 + leader->nr_siblings; @@ -3567,7 +3688,7 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); if (copy_to_user(buf, values, size)) - goto unlock; + return -EFAULT; ret = size; @@ -3581,14 +3702,11 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); if (copy_to_user(buf + ret, values, size)) { - ret = -EFAULT; - goto unlock; + return -EFAULT; } ret += size; } -unlock: - mutex_unlock(&ctx->mutex); return ret; } @@ -3660,8 +3778,14 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_event *event = file->private_data; + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = perf_read_hw(event, buf, count); + perf_event_ctx_unlock(event, ctx); - return perf_read_hw(event, buf, count); + return ret; } static unsigned int perf_poll(struct file *file, poll_table *wait) @@ -3687,7 +3811,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) return events; } -static void perf_event_reset(struct perf_event *event) +static void _perf_event_reset(struct perf_event *event) { (void)perf_event_read(event); local64_set(&event->count, 0); @@ -3706,6 +3830,7 @@ static void perf_event_for_each_child(struct perf_event *event, struct perf_event *child; WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->child_mutex); func(event); list_for_each_entry(child, &event->child_list, child_list) @@ -3719,14 +3844,13 @@ static void perf_event_for_each(struct perf_event *event, struct perf_event_context *ctx = event->ctx; struct perf_event *sibling; - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); + lockdep_assert_held(&ctx->mutex); + event = event->group_leader; perf_event_for_each_child(event, func); list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each_child(sibling, func); - mutex_unlock(&ctx->mutex); } struct period_event { @@ -3831,25 +3955,24 @@ static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event); static int perf_event_set_filter(struct perf_event *event, void __user *arg); -static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) { - struct perf_event *event = file->private_data; void (*func)(struct perf_event *); u32 flags = arg; switch (cmd) { case PERF_EVENT_IOC_ENABLE: - func = perf_event_enable; + func = _perf_event_enable; break; case PERF_EVENT_IOC_DISABLE: - func = perf_event_disable; + func = _perf_event_disable; break; case PERF_EVENT_IOC_RESET: - func = perf_event_reset; + func = _perf_event_reset; break; case PERF_EVENT_IOC_REFRESH: - return perf_event_refresh(event, arg); + return _perf_event_refresh(event, arg); case PERF_EVENT_IOC_PERIOD: return perf_event_period(event, (u64 __user *)arg); @@ -3896,6 +4019,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct perf_event *event = file->private_data; + struct perf_event_context *ctx; + long ret; + + ctx = perf_event_ctx_lock(event); + ret = _perf_ioctl(event, cmd, arg); + perf_event_ctx_unlock(event, ctx); + + return ret; +} + #ifdef CONFIG_COMPAT static long perf_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -3918,11 +4054,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd, int perf_event_task_enable(void) { + struct perf_event_context *ctx; struct perf_event *event; mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_enable); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { + ctx = perf_event_ctx_lock(event); + perf_event_for_each_child(event, _perf_event_enable); + perf_event_ctx_unlock(event, ctx); + } mutex_unlock(¤t->perf_event_mutex); return 0; @@ -3930,11 +4070,15 @@ int perf_event_task_enable(void) int perf_event_task_disable(void) { + struct perf_event_context *ctx; struct perf_event *event; mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_disable); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { + ctx = perf_event_ctx_lock(event); + perf_event_for_each_child(event, _perf_event_disable); + perf_event_ctx_unlock(event, ctx); + } mutex_unlock(¤t->perf_event_mutex); return 0; @@ -5707,9 +5851,6 @@ struct swevent_htable { /* Recursion avoidance in each contexts */ int recursion[PERF_NR_CONTEXTS]; - - /* Keeps track of cpu being initialized/exited */ - bool online; }; static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); @@ -5967,14 +6108,8 @@ static int perf_swevent_add(struct perf_event *event, int flags) hwc->state = !(flags & PERF_EF_START); head = find_swevent_head(swhash, event); - if (!head) { - /* - * We can race with cpu hotplug code. Do not - * WARN if the cpu just got unplugged. - */ - WARN_ON_ONCE(swhash->online); + if (WARN_ON_ONCE(!head)) return -EINVAL; - } hlist_add_head_rcu(&event->hlist_entry, head); @@ -6041,7 +6176,6 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu) int err = 0; mutex_lock(&swhash->hlist_mutex); - if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) { struct swevent_hlist *hlist; @@ -7271,6 +7405,15 @@ out: return ret; } +static void mutex_lock_double(struct mutex *a, struct mutex *b) +{ + if (b < a) + swap(a, b); + + mutex_lock(a); + mutex_lock_nested(b, SINGLE_DEPTH_NESTING); +} + /** * sys_perf_event_open - open a performance event, associate it to a task/cpu * @@ -7286,7 +7429,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event *group_leader = NULL, *output_event = NULL; struct perf_event *event, *sibling; struct perf_event_attr attr; - struct perf_event_context *ctx; + struct perf_event_context *ctx, *uninitialized_var(gctx); struct file *event_file = NULL; struct fd group = {NULL, 0}; struct task_struct *task = NULL; @@ -7484,9 +7627,14 @@ SYSCALL_DEFINE5(perf_event_open, } if (move_group) { - struct perf_event_context *gctx = group_leader->ctx; + gctx = group_leader->ctx; + + /* + * See perf_event_ctx_lock() for comments on the details + * of swizzling perf_event::ctx. + */ + mutex_lock_double(&gctx->mutex, &ctx->mutex); - mutex_lock(&gctx->mutex); perf_remove_from_context(group_leader, false); /* @@ -7501,15 +7649,19 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(sibling); put_ctx(gctx); } - mutex_unlock(&gctx->mutex); - put_ctx(gctx); + } else { + mutex_lock(&ctx->mutex); } WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); if (move_group) { + /* + * Wait for everybody to stop referencing the events through + * the old lists, before installing it on new lists. + */ synchronize_rcu(); + perf_install_in_context(ctx, group_leader, group_leader->cpu); get_ctx(ctx); list_for_each_entry(sibling, &group_leader->sibling_list, @@ -7521,6 +7673,11 @@ SYSCALL_DEFINE5(perf_event_open, perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); + + if (move_group) { + mutex_unlock(&gctx->mutex); + put_ctx(gctx); + } mutex_unlock(&ctx->mutex); put_online_cpus(); @@ -7628,7 +7785,11 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; - mutex_lock(&src_ctx->mutex); + /* + * See perf_event_ctx_lock() for comments on the details + * of swizzling perf_event::ctx. + */ + mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); list_for_each_entry_safe(event, tmp, &src_ctx->event_list, event_entry) { perf_remove_from_context(event, false); @@ -7636,11 +7797,9 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) put_ctx(src_ctx); list_add(&event->migrate_entry, &events); } - mutex_unlock(&src_ctx->mutex); synchronize_rcu(); - mutex_lock(&dst_ctx->mutex); list_for_each_entry_safe(event, tmp, &events, migrate_entry) { list_del(&event->migrate_entry); if (event->state >= PERF_EVENT_STATE_OFF) @@ -7650,6 +7809,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) get_ctx(dst_ctx); } mutex_unlock(&dst_ctx->mutex); + mutex_unlock(&src_ctx->mutex); } EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); @@ -8172,7 +8332,6 @@ static void perf_event_init_cpu(int cpu) struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); mutex_lock(&swhash->hlist_mutex); - swhash->online = true; if (swhash->hlist_refcount > 0) { struct swevent_hlist *hlist; @@ -8225,14 +8384,7 @@ static void perf_event_exit_cpu_context(int cpu) static void perf_event_exit_cpu(int cpu) { - struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); - perf_event_exit_cpu_context(cpu); - - mutex_lock(&swhash->hlist_mutex); - swhash->online = false; - swevent_hlist_release(swhash); - mutex_unlock(&swhash->hlist_mutex); } #else static inline void perf_event_exit_cpu(int cpu) { } diff --git a/kernel/padata.c b/kernel/padata.c index 1e3f9772dbd4..b36d255ea48d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -357,7 +357,7 @@ static int padata_setup_cpumasks(struct parallel_data *pd, cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask); if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) { - free_cpumask_var(pd->cpumask.cbcpu); + free_cpumask_var(pd->cpumask.pcpu); return -ENOMEM; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5f4c71c5d38e..a760c9e0353e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2709,7 +2709,8 @@ static noinline void __schedule_bug(struct task_struct *prev) static inline void schedule_debug(struct task_struct *prev) { #ifdef CONFIG_SCHED_STACK_END_CHECK - BUG_ON(unlikely(task_stack_end_corrupted(prev))); + if (task_stack_end_corrupted(prev)) + panic("corrupted stack end detected inside scheduler\n"); #endif /* * Test if we are atomic. Since do_exit() needs to call into diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 1a000bb050f9..806c5b6b4b3a 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -69,7 +69,7 @@ next_tag: /* Extract a tag from the data */ tag = data[dp++]; - if (tag == 0) { + if (tag == ASN1_EOC) { /* It appears to be an EOC. */ if (data[dp++] != 0) goto invalid_eoc; @@ -91,10 +91,8 @@ next_tag: /* Extract the length */ len = data[dp++]; - if (len <= 0x7f) { - dp += len; - goto next_tag; - } + if (len <= 0x7f) + goto check_length; if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ @@ -105,14 +103,18 @@ next_tag: } n = len - 0x80; - if (unlikely(n > sizeof(size_t) - 1)) + if (unlikely(n > sizeof(len) - 1)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; - for (len = 0; n > 0; n--) { + len = 0; + for (; n > 0; n--) { len <<= 8; len |= data[dp++]; } +check_length: + if (len > datalen - dp) + goto data_overrun_error; dp += len; goto next_tag; @@ -208,9 +210,8 @@ next_op: unsigned char tmp; /* Skip conditional matches if possible */ - if ((op & ASN1_OP_MATCH__COND && - flags & FLAG_MATCHED) || - dp == datalen) { + if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) || + (op & ASN1_OP_MATCH__SKIP && dp == datalen)) { pc += asn1_op_lengths[op]; goto next_op; } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 1fbc1aadb450..972b01eb43a4 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -909,7 +909,8 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; - if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE)) + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE| + MSG_CMSG_COMPAT)) return -EINVAL; if (len < 4 || len > HCI_MAX_FRAME_SIZE) diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 18361cbfc882..5fb8b7b579f9 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) struct sock *sk = skb->sk; struct ipv6_pinfo *np = inet6_sk(sk); struct flowi6 *fl6 = &transport->fl.u.ip6; + int res; pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, skb->len, &fl6->saddr, &fl6->daddr); @@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); - return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); + rcu_read_lock(); + res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); + rcu_read_unlock(); + return res; } /* Returns the dst cache entry for the given source and destination ip @@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, pr_debug("src=%pI6 - ", &fl6->saddr); } - final_p = fl6_update_dst(fl6, np->opt, &final); + rcu_read_lock(); + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); + rcu_read_unlock(); + dst = ip6_dst_lookup_flow(sk, fl6, final_p); if (!asoc || saddr) goto out; @@ -316,14 +323,13 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } } } - rcu_read_unlock(); - if (baddr) { fl6->saddr = baddr->v6.sin6_addr; fl6->fl6_sport = baddr->v6.sin6_port; - final_p = fl6_update_dst(fl6, np->opt, &final); + final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); dst = ip6_dst_lookup_flow(sk, fl6, final_p); } + rcu_read_unlock(); out: if (!IS_ERR_OR_NULL(dst)) { diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 7950b4c26651..29b1f4dc48ca 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1722,7 +1722,12 @@ restart_locked: goto out_unlock; } - if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { + /* other == sk && unix_peer(other) != sk if + * - unix_peer(sk) == NULL, destination address bound to sk + * - unix_peer(sk) == sk by time of get but disconnected before lock + */ + if (other != sk && + unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo); diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index a0cda38205b9..77ec21420355 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -142,8 +142,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked) static void queue_delete(struct snd_seq_queue *q) { /* stop and release the timer */ + mutex_lock(&q->timer_mutex); snd_seq_timer_stop(q->timer); snd_seq_timer_close(q); + mutex_unlock(&q->timer_mutex); /* wait until access free */ snd_use_lock_sync(&q->use_lock); /* release resources... */ diff --git a/sound/core/timer.c b/sound/core/timer.c index c9da76e05b3f..d448437df4b5 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -77,7 +77,7 @@ struct snd_timer_user { struct timespec tstamp; /* trigger tstamp */ wait_queue_head_t qchange_sleep; struct fasync_struct *fasync; - struct mutex tread_sem; + struct mutex ioctl_lock; }; /* list of timers */ @@ -1260,6 +1260,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri, tu->tstamp = *tstamp; if ((tu->filter & (1 << event)) == 0 || !tu->tread) return; + memset(&r1, 0, sizeof(r1)); r1.event = event; r1.tstamp = *tstamp; r1.val = resolution; @@ -1294,6 +1295,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri, } if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) && tu->last_resolution != resolution) { + memset(&r1, 0, sizeof(r1)); r1.event = SNDRV_TIMER_EVENT_RESOLUTION; r1.tstamp = tstamp; r1.val = resolution; @@ -1342,7 +1344,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file) return -ENOMEM; spin_lock_init(&tu->qlock); init_waitqueue_head(&tu->qchange_sleep); - mutex_init(&tu->tread_sem); + mutex_init(&tu->ioctl_lock); tu->ticks = 1; tu->queue_size = 128; tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), @@ -1362,8 +1364,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file) if (file->private_data) { tu = file->private_data; file->private_data = NULL; + mutex_lock(&tu->ioctl_lock); if (tu->timeri) snd_timer_close(tu->timeri); + mutex_unlock(&tu->ioctl_lock); kfree(tu->queue); kfree(tu->tqueue); kfree(tu); @@ -1601,7 +1605,6 @@ static int snd_timer_user_tselect(struct file *file, int err = 0; tu = file->private_data; - mutex_lock(&tu->tread_sem); if (tu->timeri) { snd_timer_close(tu->timeri); tu->timeri = NULL; @@ -1645,7 +1648,6 @@ static int snd_timer_user_tselect(struct file *file, } __err: - mutex_unlock(&tu->tread_sem); return err; } @@ -1759,6 +1761,7 @@ static int snd_timer_user_params(struct file *file, if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) { if (tu->tread) { struct snd_timer_tread tread; + memset(&tread, 0, sizeof(tread)); tread.event = SNDRV_TIMER_EVENT_EARLY; tread.tstamp.tv_sec = 0; tread.tstamp.tv_nsec = 0; @@ -1861,7 +1864,7 @@ enum { SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23), }; -static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, +static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct snd_timer_user *tu; @@ -1878,17 +1881,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, { int xarg; - mutex_lock(&tu->tread_sem); - if (tu->timeri) { /* too late */ - mutex_unlock(&tu->tread_sem); + if (tu->timeri) /* too late */ return -EBUSY; - } - if (get_user(xarg, p)) { - mutex_unlock(&tu->tread_sem); + if (get_user(xarg, p)) return -EFAULT; - } tu->tread = xarg ? 1 : 0; - mutex_unlock(&tu->tread_sem); return 0; } case SNDRV_TIMER_IOCTL_GINFO: @@ -1921,6 +1918,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, return -ENOTTY; } +static long snd_timer_user_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct snd_timer_user *tu = file->private_data; + long ret; + + mutex_lock(&tu->ioctl_lock); + ret = __snd_timer_user_ioctl(file, cmd, arg); + mutex_unlock(&tu->ioctl_lock); + return ret; +} + static int snd_timer_user_fasync(int fd, struct file * file, int on) { struct snd_timer_user *tu;