Patch "[PATCH v2 for-4.9 24/40] vfio/spapr: Reference mm in tce_container" has been added to the 4.9-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    [PATCH v2 for-4.9 24/40] vfio/spapr: Reference mm in tce_container

to the 4.9-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     vfio-spapr-reference-mm-in-tce_container.patch
and it can be found in the queue-4.9 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From foo@baz Mon Mar 20 11:41:01 CET 2017
From: alexander.levin@xxxxxxxxxxx
Date: Fri, 17 Mar 2017 00:48:27 +0000
Subject: [PATCH v2 for-4.9 24/40] vfio/spapr: Reference mm in tce_container
To: "gregkh@xxxxxxxxxxxxxxxxxxx" <gregkh@xxxxxxxxxxxxxxxxxxx>
Cc: "stable@xxxxxxxxxxxxxxx" <stable@xxxxxxxxxxxxxxx>
Message-ID: <20170317004812.26960-24-alexander.levin@xxxxxxxxxxx>

From: Alexey Kardashevskiy <aik@xxxxxxxxx>

[ Upstream commit bc82d122ae4a0e9f971f13403995898fcfa0c09e ]

In some situations the userspace memory context may live longer than
the userspace process itself so if we need to do proper memory context
cleanup, we better have tce_container take a reference to mm_struct and
use it later when the process is gone (@current or @current->mm is NULL).

This references mm and stores the pointer in the container; this is done
in a new helper - tce_iommu_mm_set() - when one of the following happens:
- a container is enabled (IOMMU v1);
- a first attempt to pre-register memory is made (IOMMU v2);
- a DMA window is created (IOMMU v2).
The @mm stays referenced till the container is destroyed.

This replaces current->mm with container->mm everywhere except debug
prints.

This adds a check that current->mm is the same as the one stored in
the container to prevent userspace from making changes to a memory
context of other processes.

DMA map/unmap ioctls() do not check for @mm as they already check
for @enabled which is set after tce_iommu_mm_set() is called.

This does not reference a task as multiple threads within the same mm
are allowed to ioctl() to vfio and supposedly they will have same limits
and capabilities and if they do not, we'll just fail with no harm made.

Signed-off-by: Alexey Kardashevskiy <aik@xxxxxxxxx>
Acked-by: Alex Williamson <alex.williamson@xxxxxxxxxx>
Reviewed-by: David Gibson <david@xxxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
Signed-off-by: Sasha Levin <alexander.levin@xxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
 drivers/vfio/vfio_iommu_spapr_tce.c |  160 ++++++++++++++++++++++--------------
 1 file changed, 100 insertions(+), 60 deletions(-)

--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -31,49 +31,49 @@
 static void tce_iommu_detach_group(void *iommu_data,
 		struct iommu_group *iommu_group);
 
-static long try_increment_locked_vm(long npages)
+static long try_increment_locked_vm(struct mm_struct *mm, long npages)
 {
 	long ret = 0, locked, lock_limit;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
+	if (WARN_ON_ONCE(!mm))
+		return -EPERM;
 
 	if (!npages)
 		return 0;
 
-	down_write(&current->mm->mmap_sem);
-	locked = current->mm->locked_vm + npages;
+	down_write(&mm->mmap_sem);
+	locked = mm->locked_vm + npages;
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
 		ret = -ENOMEM;
 	else
-		current->mm->locked_vm += npages;
+		mm->locked_vm += npages;
 
 	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK),
 			ret ? " - exceeded" : "");
 
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 
 	return ret;
 }
 
-static void decrement_locked_vm(long npages)
+static void decrement_locked_vm(struct mm_struct *mm, long npages)
 {
-	if (!current || !current->mm || !npages)
-		return; /* process exited */
+	if (!mm || !npages)
+		return;
 
-	down_write(&current->mm->mmap_sem);
-	if (WARN_ON_ONCE(npages > current->mm->locked_vm))
-		npages = current->mm->locked_vm;
-	current->mm->locked_vm -= npages;
+	down_write(&mm->mmap_sem);
+	if (WARN_ON_ONCE(npages > mm->locked_vm))
+		npages = mm->locked_vm;
+	mm->locked_vm -= npages;
 	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
 			npages << PAGE_SHIFT,
-			current->mm->locked_vm << PAGE_SHIFT,
+			mm->locked_vm << PAGE_SHIFT,
 			rlimit(RLIMIT_MEMLOCK));
-	up_write(&current->mm->mmap_sem);
+	up_write(&mm->mmap_sem);
 }
 
 /*
@@ -98,26 +98,38 @@ struct tce_container {
 	bool enabled;
 	bool v2;
 	unsigned long locked_pages;
+	struct mm_struct *mm;
 	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
 	struct list_head group_list;
 };
 
+static long tce_iommu_mm_set(struct tce_container *container)
+{
+	if (container->mm) {
+		if (container->mm == current->mm)
+			return 0;
+		return -EPERM;
+	}
+	BUG_ON(!current->mm);
+	container->mm = current->mm;
+	atomic_inc(&container->mm->mm_count);
+
+	return 0;
+}
+
 static long tce_iommu_unregister_pages(struct tce_container *container,
 		__u64 vaddr, __u64 size)
 {
 	struct mm_iommu_table_group_mem_t *mem;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
 		return -EINVAL;
 
-	mem = mm_iommu_find(current->mm, vaddr, size >> PAGE_SHIFT);
+	mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
 	if (!mem)
 		return -ENOENT;
 
-	return mm_iommu_put(current->mm, mem);
+	return mm_iommu_put(container->mm, mem);
 }
 
 static long tce_iommu_register_pages(struct tce_container *container,
@@ -127,14 +139,11 @@ static long tce_iommu_register_pages(str
 	struct mm_iommu_table_group_mem_t *mem = NULL;
 	unsigned long entries = size >> PAGE_SHIFT;
 
-	if (!current || !current->mm)
-		return -ESRCH; /* process exited */
-
 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
 			((vaddr + size) < vaddr))
 		return -EINVAL;
 
-	ret = mm_iommu_get(current->mm, vaddr, entries, &mem);
+	ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
 	if (ret)
 		return ret;
 
@@ -143,7 +152,8 @@ static long tce_iommu_register_pages(str
 	return 0;
 }
 
-static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
+static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -152,13 +162,13 @@ static long tce_iommu_userspace_view_all
 
 	BUG_ON(tbl->it_userspace);
 
-	ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
 	uas = vzalloc(cb);
 	if (!uas) {
-		decrement_locked_vm(cb >> PAGE_SHIFT);
+		decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 		return -ENOMEM;
 	}
 	tbl->it_userspace = uas;
@@ -166,7 +176,8 @@ static long tce_iommu_userspace_view_all
 	return 0;
 }
 
-static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
+static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
+		struct mm_struct *mm)
 {
 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
 			tbl->it_size, PAGE_SIZE);
@@ -176,7 +187,7 @@ static void tce_iommu_userspace_view_fre
 
 	vfree(tbl->it_userspace);
 	tbl->it_userspace = NULL;
-	decrement_locked_vm(cb >> PAGE_SHIFT);
+	decrement_locked_vm(mm, cb >> PAGE_SHIFT);
 }
 
 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
@@ -236,9 +247,6 @@ static int tce_iommu_enable(struct tce_c
 	struct iommu_table_group *table_group;
 	struct tce_iommu_group *tcegrp;
 
-	if (!current->mm)
-		return -ESRCH; /* process exited */
-
 	if (container->enabled)
 		return -EBUSY;
 
@@ -283,8 +291,12 @@ static int tce_iommu_enable(struct tce_c
 	if (!table_group->tce32_size)
 		return -EPERM;
 
+	ret = tce_iommu_mm_set(container);
+	if (ret)
+		return ret;
+
 	locked = table_group->tce32_size >> PAGE_SHIFT;
-	ret = try_increment_locked_vm(locked);
+	ret = try_increment_locked_vm(container->mm, locked);
 	if (ret)
 		return ret;
 
@@ -302,10 +314,8 @@ static void tce_iommu_disable(struct tce
 
 	container->enabled = false;
 
-	if (!current->mm)
-		return;
-
-	decrement_locked_vm(container->locked_pages);
+	BUG_ON(!container->mm);
+	decrement_locked_vm(container->mm, container->locked_pages);
 }
 
 static void *tce_iommu_open(unsigned long arg)
@@ -332,7 +342,8 @@ static void *tce_iommu_open(unsigned lon
 static int tce_iommu_clear(struct tce_container *container,
 		struct iommu_table *tbl,
 		unsigned long entry, unsigned long pages);
-static void tce_iommu_free_table(struct iommu_table *tbl);
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl);
 
 static void tce_iommu_release(void *iommu_data)
 {
@@ -357,10 +368,12 @@ static void tce_iommu_release(void *iomm
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_free_table(tbl);
+		tce_iommu_free_table(container, tbl);
 	}
 
 	tce_iommu_disable(container);
+	if (container->mm)
+		mmdrop(container->mm);
 	mutex_destroy(&container->lock);
 
 	kfree(container);
@@ -375,13 +388,14 @@ static void tce_iommu_unuse_page(struct
 	put_page(page);
 }
 
-static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
+static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
+		unsigned long tce, unsigned long size,
 		unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
 {
 	long ret = 0;
 	struct mm_iommu_table_group_mem_t *mem;
 
-	mem = mm_iommu_lookup(current->mm, tce, size);
+	mem = mm_iommu_lookup(container->mm, tce, size);
 	if (!mem)
 		return -EINVAL;
 
@@ -394,18 +408,18 @@ static int tce_iommu_prereg_ua_to_hpa(un
 	return 0;
 }
 
-static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
-		unsigned long entry)
+static void tce_iommu_unuse_page_v2(struct tce_container *container,
+		struct iommu_table *tbl, unsigned long entry)
 {
 	struct mm_iommu_table_group_mem_t *mem = NULL;
 	int ret;
 	unsigned long hpa = 0;
 	unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
 
-	if (!pua || !current || !current->mm)
+	if (!pua)
 		return;
 
-	ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
+	ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
 			&hpa, &mem);
 	if (ret)
 		pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -435,7 +449,7 @@ static int tce_iommu_clear(struct tce_co
 			continue;
 
 		if (container->v2) {
-			tce_iommu_unuse_page_v2(tbl, entry);
+			tce_iommu_unuse_page_v2(container, tbl, entry);
 			continue;
 		}
 
@@ -516,7 +530,7 @@ static long tce_iommu_build_v2(struct tc
 	enum dma_data_direction dirtmp;
 
 	if (!tbl->it_userspace) {
-		ret = tce_iommu_userspace_view_alloc(tbl);
+		ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
 		if (ret)
 			return ret;
 	}
@@ -526,8 +540,8 @@ static long tce_iommu_build_v2(struct tc
 		unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
 				entry + i);
 
-		ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
-				&hpa, &mem);
+		ret = tce_iommu_prereg_ua_to_hpa(container,
+				tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
 		if (ret)
 			break;
 
@@ -548,7 +562,7 @@ static long tce_iommu_build_v2(struct tc
 		ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
 		if (ret) {
 			/* dirtmp cannot be DMA_NONE here */
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 			pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
 					__func__, entry << tbl->it_page_shift,
 					tce, ret);
@@ -556,7 +570,7 @@ static long tce_iommu_build_v2(struct tc
 		}
 
 		if (dirtmp != DMA_NONE)
-			tce_iommu_unuse_page_v2(tbl, entry + i);
+			tce_iommu_unuse_page_v2(container, tbl, entry + i);
 
 		*pua = tce;
 
@@ -584,7 +598,7 @@ static long tce_iommu_create_table(struc
 	if (!table_size)
 		return -EINVAL;
 
-	ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
+	ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
 	if (ret)
 		return ret;
 
@@ -597,13 +611,14 @@ static long tce_iommu_create_table(struc
 	return ret;
 }
 
-static void tce_iommu_free_table(struct iommu_table *tbl)
+static void tce_iommu_free_table(struct tce_container *container,
+		struct iommu_table *tbl)
 {
 	unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
 
-	tce_iommu_userspace_view_free(tbl);
+	tce_iommu_userspace_view_free(tbl, container->mm);
 	tbl->it_ops->free(tbl);
-	decrement_locked_vm(pages);
+	decrement_locked_vm(container->mm, pages);
 }
 
 static long tce_iommu_create_window(struct tce_container *container,
@@ -666,7 +681,7 @@ unset_exit:
 		table_group = iommu_group_get_iommudata(tcegrp->grp);
 		table_group->ops->unset_window(table_group, num);
 	}
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 
 	return ret;
 }
@@ -704,7 +719,7 @@ static long tce_iommu_remove_window(stru
 
 	/* Free table */
 	tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-	tce_iommu_free_table(tbl);
+	tce_iommu_free_table(container, tbl);
 	container->tables[num] = NULL;
 
 	return 0;
@@ -730,7 +745,17 @@ static long tce_iommu_ioctl(void *iommu_
 		}
 
 		return (ret < 0) ? 0 : ret;
+	}
+
+	/*
+	 * Sanity check to prevent one userspace from manipulating
+	 * another userspace mm.
+	 */
+	BUG_ON(!container);
+	if (container->mm && container->mm != current->mm)
+		return -EPERM;
 
+	switch (cmd) {
 	case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
 		struct vfio_iommu_spapr_tce_info info;
 		struct tce_iommu_group *tcegrp;
@@ -891,6 +916,10 @@ static long tce_iommu_ioctl(void *iommu_
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (copy_from_user(&param, (void __user *)arg, minsz))
 			return -EFAULT;
 
@@ -914,6 +943,9 @@ static long tce_iommu_ioctl(void *iommu_
 		if (!container->v2)
 			break;
 
+		if (!container->mm)
+			return -EPERM;
+
 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
 				size);
 
@@ -972,6 +1004,10 @@ static long tce_iommu_ioctl(void *iommu_
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -1006,6 +1042,10 @@ static long tce_iommu_ioctl(void *iommu_
 		if (!container->v2)
 			break;
 
+		ret = tce_iommu_mm_set(container);
+		if (ret)
+			return ret;
+
 		if (!tce_groups_attached(container))
 			return -ENXIO;
 
@@ -1046,7 +1086,7 @@ static void tce_iommu_release_ownership(
 			continue;
 
 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
-		tce_iommu_userspace_view_free(tbl);
+		tce_iommu_userspace_view_free(tbl, container->mm);
 		if (tbl->it_map)
 			iommu_release_ownership(tbl);
 


Patches currently in stable-queue which might be from gregkh@xxxxxxxxxxxxxxxxxxx are

queue-4.9/pci-add-comments-about-rom-bar-updating.patch
queue-4.9/acpi-blacklist-make-dell-latitude-3350-ethernet-work.patch
queue-4.9/s390-zcrypt-introduce-cex6-toleration.patch
queue-4.9/dccp-tcp-fix-routing-redirect-race.patch
queue-4.9/vrf-fix-use-after-free-in-vrf_xmit.patch
queue-4.9/tcp-fix-various-issues-for-sockets-morphing-to-listen-state.patch
queue-4.9/block-allow-write_same-commands-with-the-sg_io-ioctl.patch
queue-4.9/strparser-destroy-workqueue-on-module-exit.patch
queue-4.9/powerpc-mm-fix-build-break-when-cma-n-spapr_tce_iommu-y.patch
queue-4.9/vfio-spapr-postpone-default-window-creation.patch
queue-4.9/vfio-spapr-add-a-helper-to-create-default-dma-window.patch
queue-4.9/pci-do-any-vf-bar-updates-before-enabling-the-bars.patch
queue-4.9/usb-gadget-udc-atmel-remove-memory-leak.patch
queue-4.9/x86-hyperv-handle-unknown-nmis-on-one-cpu-when-unknown_nmi_panic.patch
queue-4.9/net-tunnel-set-inner-protocol-in-network-gro-hooks.patch
queue-4.9/serial-8250_pci-detach-low-level-driver-during-pci-error-recovery.patch
queue-4.9/powerpc-iommu-stop-using-current-in-mm_iommu_xxx.patch
queue-4.9/tun-fix-premature-pollout-notification-on-tun-devices.patch
queue-4.9/vxlan-correctly-validate-vxlan-id-against-vxlan_n_vid.patch
queue-4.9/bpf-fix-regression-on-verifier-pruning-wrt-map-lookups.patch
queue-4.9/tcp-dccp-block-bh-for-syn-processing.patch
queue-4.9/net-sched-act_skbmod-remove-unneeded-rcu_read_unlock-in-tcf_skbmod_dump.patch
queue-4.9/dccp-fix-memory-leak-during-tear-down-of-unsuccessful-connection-request.patch
queue-4.9/xen-do-not-re-use-pirq-number-cached-in-pci-device-msi-msg-data.patch
queue-4.9/vxlan-lock-rcu-on-tx-path.patch
queue-4.9/mlxsw-spectrum_router-avoid-potential-packets-loss.patch
queue-4.9/mpls-do-not-decrement-alive-counter-for-unregister-events.patch
queue-4.9/net-phy-avoid-deadlock-during-phy_error.patch
queue-4.9/uapi-fix-linux-packet_diag.h-userspace-compilation-error.patch
queue-4.9/pci-separate-vf-bar-updates-from-standard-bar-updates.patch
queue-4.9/pci-ignore-bar-updates-on-virtual-functions.patch
queue-4.9/geneve-lock-rcu-on-tx-path.patch
queue-4.9/dccp-fix-use-after-free-in-dccp_feat_activate_values.patch
queue-4.9/l2tp-avoid-use-after-free-caused-by-l2tp_ip_backlog_recv.patch
queue-4.9/powerpc-mm-iommu-vfio-spapr-put-pages-on-vfio-container-shutdown.patch
queue-4.9/bpf-fix-state-equivalence.patch
queue-4.9/scsi-ibmvscsis-clean-up-properly-if-target_submit_cmd-tmr-fails.patch
queue-4.9/drm-nouveau-disp-gp102-fix-cursor-overlay-immediate-channel-indices.patch
queue-4.9/pci-update-bars-using-property-bits-appropriate-for-type.patch
queue-4.9/scsi-ibmvscsis-synchronize-cmds-at-remove-time.patch
queue-4.9/vfio-spapr-postpone-allocation-of-userspace-version-of-tce-table.patch
queue-4.9/ibmveth-calculate-gso_segs-for-large-packets.patch
queue-4.9/net-mlx5e-do-not-reduce-lro-wqe-size-when-not-using-build_skb.patch
queue-4.9/net-sched-actions-decrement-module-reference-count-after-table-flush.patch
queue-4.9/pci-don-t-update-vf-bars-while-vf-memory-space-is-enabled.patch
queue-4.9/ipv4-mask-tos-for-input-route.patch
queue-4.9/net-fix-socket-refcounting-in-skb_complete_tx_timestamp.patch
queue-4.9/net-bridge-allow-ipv6-when-multicast-flood-is-disabled.patch
queue-4.9/net-mlx5e-fix-wrong-cqe-decompression.patch
queue-4.9/net-net_enable_timestamp-can-be-called-from-irq-contexts.patch
queue-4.9/igb-workaround-for-igb-i210-firmware-issue.patch
queue-4.9/drivers-hv-ring_buffer-count-on-wrap-around-mappings-in-get_next_pkt_raw-v2.patch
queue-4.9/drm-nouveau-disp-nv50-specify-ctrl-user-separately-when-constructing-classes.patch
queue-4.9/ipv6-make-ecmp-route-replacement-less-greedy.patch
queue-4.9/ipv6-avoid-write-to-a-possibly-cloned-skb.patch
queue-4.9/pci-remove-pci_resource_bar-and-pci_iov_resource_bar.patch
queue-4.9/mpls-send-route-delete-notifications-when-router-module-is-unloaded.patch
queue-4.9/dmaengine-iota-ioat_alloc_chan_resources-should-not-perform-sleeping-allocations.patch
queue-4.9/scsi-ibmvscsis-return-correct-partition-name-to-client.patch
queue-4.9/vti6-return-gre_key-for-vti6.patch
queue-4.9/vfio-spapr-reference-mm-in-tce_container.patch
queue-4.9/scsi-ibmvscsis-rearrange-functions-for-future-patches.patch
queue-4.9/dccp-unlock-sock-before-calling-sk_free.patch
queue-4.9/bpf-fix-mark_reg_unknown_value-for-spilled-regs-on-map-value-marking.patch
queue-4.9/powerpc-iommu-pass-mm_struct-to-init-cleanup-helpers.patch
queue-4.9/slub-move-synchronize_sched-out-of-slab_mutex-on-shrink.patch
queue-4.9/net-mlx5e-register-unregister-vport-representors-on-interface-attach-detach.patch
queue-4.9/pci-decouple-ioresource_rom_enable-and-pci_rom_address_enable.patch
queue-4.9/net-don-t-call-strlen-on-the-user-buffer-in-packet_bind_spkt.patch
queue-4.9/bpf-detect-identical-ptr_to_map_value_or_null-registers.patch
queue-4.9/scsi-ibmvscsis-issues-from-dan-carpenter-smatch.patch
queue-4.9/vxlan-don-t-allow-overwrite-of-config-src-addr.patch
queue-4.9/acpi-blacklist-add-_rev-quirks-for-dell-precision-5520-and-3520.patch
queue-4.9/bridge-drop-netfilter-fake-rtable-unconditionally.patch
queue-4.9/igb-add-i211-to-i210-phy-workaround.patch
queue-4.9/drm-nouveau-disp-nv50-split-chid-into-chid.ctrl-and-chid.user.patch
queue-4.9/net-fix-socket-refcounting-in-skb_complete_wifi_ack.patch
queue-4.9/scsi-ibmvscsis-synchronize-cmds-at-tpg_enable_store-time.patch
queue-4.9/ipv6-orphan-skbs-in-reassembly-unit.patch
queue-4.9/act_connmark-avoid-crashing-on-malformed-nlattrs-with-null-parms.patch
queue-4.9/uvcvideo-uvc_scan_fallback-for-webcams-with-broken-chain.patch



[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]