+ lock-validator-special-locking-slab.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     lock validator: special locking: slab

has been added to the -mm tree.  Its filename is

     lock-validator-special-locking-slab.patch

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this

------------------------------------------------------
Subject: lock validator: special locking: slab
From: Ingo Molnar <mingo@xxxxxxx>


Teach special (recursive) locking code to the lock validator.  Has no effect
on non-lockdep kernels.

Fix initialize-locks-via-memcpy assumptions.

Signed-off-by: Ingo Molnar <mingo@xxxxxxx>
Signed-off-by: Arjan van de Ven <arjan@xxxxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 mm/slab.c |   59 ++++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 48 insertions(+), 11 deletions(-)

diff -puN mm/slab.c~lock-validator-special-locking-slab mm/slab.c
--- devel/mm/slab.c~lock-validator-special-locking-slab	2006-05-29 18:13:19.000000000 -0700
+++ devel-akpm/mm/slab.c	2006-05-29 18:13:19.000000000 -0700
@@ -1023,7 +1023,8 @@ static void drain_alien_cache(struct kme
 	}
 }
 
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
+				   int nesting)
 {
 	struct slab *slabp = virt_to_slab(objp);
 	int nodeid = slabp->nodeid;
@@ -1041,7 +1042,7 @@ static inline int cache_free_alien(struc
 	STATS_INC_NODEFREES(cachep);
 	if (l3->alien && l3->alien[nodeid]) {
 		alien = l3->alien[nodeid];
-		spin_lock(&alien->lock);
+		spin_lock_nested(&alien->lock, nesting);
 		if (unlikely(alien->avail == alien->limit)) {
 			STATS_INC_ACOVERFLOW(cachep);
 			__drain_alien_cache(cachep, alien, nodeid);
@@ -1070,7 +1071,8 @@ static inline void free_alien_cache(stru
 {
 }
 
-static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
+				   int nesting)
 {
 	return 0;
 }
@@ -1275,6 +1277,11 @@ static void init_list(struct kmem_cache 
 
 	local_irq_disable();
 	memcpy(ptr, list, sizeof(struct kmem_list3));
+	/*
+	 * Do not assume that spinlocks can be initialized via memcpy:
+	 */
+	spin_lock_init(&ptr->list_lock);
+
 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
 	cachep->nodelists[nodeid] = ptr;
 	local_irq_enable();
@@ -1405,7 +1412,7 @@ void __init kmem_cache_init(void)
 	}
 	/* 4) Replace the bootstrap head arrays */
 	{
-		void *ptr;
+		struct array_cache *ptr;
 
 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
 
@@ -1413,6 +1420,11 @@ void __init kmem_cache_init(void)
 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
 		memcpy(ptr, cpu_cache_get(&cache_cache),
 		       sizeof(struct arraycache_init));
+		/*
+		 * Do not assume that spinlocks can be initialized via memcpy:
+		 */
+		spin_lock_init(&ptr->lock);
+
 		cache_cache.array[smp_processor_id()] = ptr;
 		local_irq_enable();
 
@@ -1423,6 +1435,11 @@ void __init kmem_cache_init(void)
 		       != &initarray_generic.cache);
 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
 		       sizeof(struct arraycache_init));
+		/*
+		 * Do not assume that spinlocks can be initialized via memcpy:
+		 */
+		spin_lock_init(&ptr->lock);
+
 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
 		    ptr;
 		local_irq_enable();
@@ -1750,6 +1767,8 @@ static void slab_destroy_objs(struct kme
 }
 #endif
 
+static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting);
+
 /**
  * slab_destroy - destroy and release all objects in a slab
  * @cachep: cache pointer being destroyed
@@ -1773,8 +1792,17 @@ static void slab_destroy(struct kmem_cac
 		call_rcu(&slab_rcu->head, kmem_rcu_free);
 	} else {
 		kmem_freepages(cachep, addr);
-		if (OFF_SLAB(cachep))
-			kmem_cache_free(cachep->slabp_cache, slabp);
+		if (OFF_SLAB(cachep)) {
+			unsigned long flags;
+
+			/*
+		 	 * lockdep: we may nest inside an already held
+			 * ac->lock, so pass in a nesting flag:
+			 */
+			local_irq_save(flags);
+			__cache_free(cachep->slabp_cache, slabp, 1);
+			local_irq_restore(flags);
+		}
 	}
 }
 
@@ -3050,7 +3078,16 @@ static void free_block(struct kmem_cache
 		if (slabp->inuse == 0) {
 			if (l3->free_objects > l3->free_limit) {
 				l3->free_objects -= cachep->num;
+				/*
+				 * It is safe to drop the lock. The slab is
+				 * no longer linked to the cache. cachep
+				 * cannot disappear - we are using it and
+				 * all destruction of caches must be
+				 * serialized properly by the user.
+				 */
+				spin_unlock(&l3->list_lock);
 				slab_destroy(cachep, slabp);
+				spin_lock(&l3->list_lock);
 			} else {
 				list_add(&slabp->list, &l3->slabs_free);
 			}
@@ -3076,7 +3113,7 @@ static void cache_flusharray(struct kmem
 #endif
 	check_irq_off();
 	l3 = cachep->nodelists[node];
-	spin_lock(&l3->list_lock);
+	spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING);
 	if (l3->shared) {
 		struct array_cache *shared_array = l3->shared;
 		int max = shared_array->limit - shared_array->avail;
@@ -3119,14 +3156,14 @@ free_done:
  * Release an obj back to its cache. If the obj has a constructed state, it must
  * be in this state _before_ it is released.  Called with disabled ints.
  */
-static inline void __cache_free(struct kmem_cache *cachep, void *objp)
+static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting)
 {
 	struct array_cache *ac = cpu_cache_get(cachep);
 
 	check_irq_off();
 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
-	if (cache_free_alien(cachep, objp))
+	if (cache_free_alien(cachep, objp, nesting))
 		return;
 
 	if (likely(ac->avail < ac->limit)) {
@@ -3381,7 +3418,7 @@ void kmem_cache_free(struct kmem_cache *
 	BUG_ON(virt_to_cache(objp) != cachep);
 
 	local_irq_save(flags);
-	__cache_free(cachep, objp);
+	__cache_free(cachep, objp, 0);
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(kmem_cache_free);
@@ -3406,7 +3443,7 @@ void kfree(const void *objp)
 	kfree_debugcheck(objp);
 	c = virt_to_cache(objp);
 	debug_check_no_locks_freed(objp, obj_size(c));
-	__cache_free(c, (void *)objp);
+	__cache_free(c, (void *)objp, 0);
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(kfree);
_

Patches currently in -mm which might be from mingo@xxxxxxx are

hrtimer-export-symbols.patch
x86_64-fix-stack-mmap-randomization-for-compat.patch
git-acpi.patch
lock-validator-sound-oss-emu10k1-midic-cleanup.patch
fix-drivers-mfd-ucb1x00-corec-irq-probing-bug.patch
git-infiniband.patch
git-netdev-all.patch
fix-for-serial-uart-lockup.patch
lock-validator-lockdep-small-xfs-init_rwsem-cleanup.patch
swapless-pm-add-r-w-migration-entries.patch
i386-break-out-of-recursion-in-stackframe-walk.patch
x86-re-enable-generic-numa.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-tidy.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-arch_vma_name-fix.patch
work-around-ppc64-bootup-bug-by-making-mutex-debugging-save-restore-irqs.patch
kernel-kernel-cpuc-to-mutexes.patch
cond-resched-might-sleep-fix.patch
define-__raw_get_cpu_var-and-use-it.patch
ide-cd-end-of-media-error-fix.patch
spin-rwlock-init-cleanups.patch
lock-validator-introduce-warn_on_oncecond.patch
time-clocksource-infrastructure.patch
sched-comment-bitmap-size-accounting.patch
sched-fix-interactive-ceiling-code.patch
sched-implement-smpnice.patch
sched-protect-calculation-of-max_pull-from-integer-wrap.patch
sched-store-weighted-load-on-up.patch
sched-add-discrete-weighted-cpu-load-function.patch
sched-prevent-high-load-weight-tasks-suppressing-balancing.patch
sched-improve-stability-of-smpnice-load-balancing.patch
sched-improve-smpnice-load-balancing-when-load-per-task.patch
smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing.patch
smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing-fix.patch
sched-modify-move_tasks-to-improve-load-balancing-outcomes.patch
sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks.patch
sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks-fix-2.patch
sched_domain-handle-kmalloc-failure.patch
sched_domain-handle-kmalloc-failure-fix.patch
sched_domain-dont-use-gfp_atomic.patch
sched_domain-use-kmalloc_node.patch
sched_domain-allocate-sched_group-structures-dynamically.patch
sched-add-above-background-load-function.patch
mm-implement-swap-prefetching-fix.patch
pi-futex-futex-code-cleanups.patch
pi-futex-robust-futex-docs-fix.patch
pi-futex-introduce-debug_check_no_locks_freed.patch
pi-futex-introduce-warn_on_smp.patch
pi-futex-add-plist-implementation.patch
pi-futex-scheduler-support-for-pi.patch
pi-futex-rt-mutex-core.patch
pi-futex-rt-mutex-docs.patch
pi-futex-rt-mutex-docs-update.patch
pi-futex-rt-mutex-debug.patch
pi-futex-rt-mutex-tester.patch
pi-futex-rt-mutex-futex-api.patch
pi-futex-futex_lock_pi-futex_unlock_pi-support.patch
futex_requeue-optimization.patch
genirq-rename-desc-handler-to-desc-chip.patch
genirq-rename-desc-handler-to-desc-chip-power-fix.patch
genirq-rename-desc-handler-to-desc-chip-ia64-fix.patch
genirq-rename-desc-handler-to-desc-chip-ia64-fix-2.patch
genirq-sem2mutex-probe_sem-probing_active.patch
genirq-cleanup-merge-irq_affinity-into-irq_desc.patch
genirq-cleanup-remove-irq_descp.patch
genirq-cleanup-remove-fastcall.patch
genirq-cleanup-misc-code-cleanups.patch
genirq-cleanup-reduce-irq_desc_t-use-mark-it-obsolete.patch
genirq-cleanup-include-linux-irqh.patch
genirq-cleanup-merge-irq_dir-smp_affinity_entry-into-irq_desc.patch
genirq-cleanup-merge-pending_irq_cpumask-into-irq_desc.patch
genirq-cleanup-turn-arch_has_irq_per_cpu-into-config_irq_per_cpu.patch
genirq-debug-better-debug-printout-in-enable_irq.patch
genirq-add-retrigger-irq-op-to-consolidate-hw_irq_resend.patch
genirq-doc-comment-include-linux-irqh-structures.patch
genirq-doc-handle_irq_event-and-__do_irq-comments.patch
genirq-cleanup-no_irq_type-cleanups.patch
genirq-doc-add-design-documentation.patch
genirq-add-genirq-sw-irq-retrigger.patch
genirq-add-irq_noprobe-support.patch
genirq-add-irq_norequest-support.patch
genirq-add-irq_noautoen-support.patch
genirq-update-copyrights.patch
genirq-core.patch
genirq-add-irq-chip-support.patch
genirq-add-handle_bad_irq.patch
genirq-add-irq-wake-power-management-support.patch
genirq-add-sa_trigger-support.patch
genirq-cleanup-no_irq_type-no_irq_chip-rename.patch
genirq-convert-the-x86_64-architecture-to-irq-chips.patch
genirq-convert-the-i386-architecture-to-irq-chips.patch
genirq-convert-the-i386-architecture-to-irq-chips-fix-2.patch
genirq-more-verbose-debugging-on-unexpected-irq-vectors.patch
lock-validator-floppyc-irq-release-fix.patch
lock-validator-forcedethc-fix.patch
lock-validator-mutex-section-binutils-workaround.patch
lock-validator-add-__module_address-method.patch
lock-validator-better-lock-debugging.patch
lock-validator-locking-api-self-tests.patch
lock-validator-locking-init-debugging-improvement.patch
lock-validator-beautify-x86_64-stacktraces.patch
lock-validator-x86_64-document-stack-frame-internals.patch
lock-validator-stacktrace.patch
lock-validator-fown-locking-workaround.patch
lock-validator-sk_callback_lock-workaround.patch
lock-validator-irqtrace-core.patch
lock-validator-irqtrace-cleanup-include-asm-i386-irqflagsh.patch
lock-validator-irqtrace-cleanup-include-asm-x86_64-irqflagsh.patch
lock-validator-lockdep-add-local_irq_enable_in_hardirq-api.patch
lock-validator-add-per_cpu_offset.patch
lock-validator-add-per_cpu_offset-fix.patch
lock-validator-core.patch
lock-validator-procfs.patch
lock-validator-design-docs.patch
lock-validator-prove-rwsem-locking-correctness.patch
lock-validator-prove-spinlock-rwlock-locking-correctness.patch
lock-validator-prove-mutex-locking-correctness.patch
lock-validator-print-all-lock-types-on-sysrq-d.patch
lock-validator-x86_64-early-init.patch
lock-validator-smp-alternatives-workaround.patch
lock-validator-do-not-recurse-in-printk.patch
lock-validator-disable-nmi-watchdog-if-config_lockdep.patch
lock-validator-special-locking-bdev.patch
lock-validator-special-locking-direct-io.patch
lock-validator-special-locking-serial.patch
lock-validator-special-locking-dcache.patch
lock-validator-special-locking-i_mutex.patch
lock-validator-special-locking-s_lock.patch
lock-validator-special-locking-futex.patch
lock-validator-special-locking-genirq.patch
lock-validator-special-locking-completions.patch
lock-validator-special-locking-waitqueues.patch
lock-validator-special-locking-mm.patch
lock-validator-special-locking-slab.patch
lock-validator-special-locking-skb_queue_head_init.patch
lock-validator-special-locking-timerc.patch
lock-validator-special-locking-schedc.patch
lock-validator-special-locking-hrtimerc.patch
lock-validator-special-locking-sock_lock_init.patch
lock-validator-special-locking-af_unix.patch
lock-validator-special-locking-bh_lock_sock.patch
lock-validator-special-locking-mmap_sem.patch
lock-validator-special-locking-sb-s_umount.patch
lock-validator-special-locking-sb-s_umount-fix.patch
lock-validator-special-locking-jbd.patch
lock-validator-special-locking-posix-timers.patch
lock-validator-special-locking-sch_genericc.patch
lock-validator-special-locking-xfrm.patch
lock-validator-special-locking-sound-core-seq-seq_portsc.patch
lock-validator-enable-lock-validator-in-kconfig.patch
lock-validator-enable-lock-validator-in-kconfig-x86-only.patch
lock-validator-special-locking-kgdb.patch
detect-atomic-counter-underflows.patch
debug-shared-irqs.patch
make-frame_pointer-default=y.patch
mutex-subsystem-synchro-test-module.patch
vdso-print-fatal-signals.patch
vdso-improve-print_fatal_signals-support-by-adding-memory-maps.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux