[patch 2/3] ipc/sem: revert ipc/sem: Rework semaphore wakeups

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



ipc/sem: revert ipc/sem: Rework semaphore wakeups

Revert Peterz's -rt wakeup scheme to prepare for replacement with a new
completion scheme by Manfred Spraul.

<original changelog>
ipc/sem: Rework semaphore wakeups
Current sysv sems have a weird ass wakeup scheme that involves keeping
preemption disabled over a potential O(n^2) loop and busy waiting on
that on other CPUs.

Kill this and simply wake the task directly from under the sem_lock.

This was discovered by a migrate_disable() debug feature that
disallows:

  spin_lock();
  preempt_disable();
  spin_unlock()
  preempt_enable();
</original changelog>

Signed-off-by: 	Mike Galbraith <bitbucket@xxxxxxxxx>
---
 ipc/sem.c |   24 +++++++-----------------
 1 file changed, 7 insertions(+), 17 deletions(-)

--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -155,7 +155,7 @@ static int sysvipc_sem_proc_show(struct
  *	sem_array.sem_pending{,last},
  *	sem_array.sem_undo: sem_lock() for read/write
  *	sem_undo.proc_next: only "current" is allowed to read/write that field.
- *
+ *
  */
 
 #define sc_semmsl	sem_ctls[0]
@@ -518,7 +518,7 @@ static int try_atomic_semop (struct sem_
 		curr = sma->sem_base + sop->sem_num;
 		sem_op = sop->sem_op;
 		result = curr->semval;
-
+
 		if (!sem_op && result)
 			goto would_block;
 
@@ -545,7 +545,7 @@ static int try_atomic_semop (struct sem_
 			un->semadj[sop->sem_num] -= sop->sem_op;
 		sop--;
 	}
-
+
 	return 0;
 
 out_of_range:
@@ -577,13 +577,6 @@ static int try_atomic_semop (struct sem_
 static void wake_up_sem_queue_prepare(struct list_head *pt,
 				struct sem_queue *q, int error)
 {
-#ifdef CONFIG_PREEMPT_RT_BASE
-	struct task_struct *p = q->sleeper;
-	get_task_struct(p);
-	q->status = error;
-	wake_up_process(p);
-	put_task_struct(p);
-#else
 	if (list_empty(pt)) {
 		/*
 		 * Hold preempt off so that we don't get preempted and have the
@@ -595,7 +588,6 @@ static void wake_up_sem_queue_prepare(st
 	q->pid = error;
 
 	list_add_tail(&q->list, pt);
-#endif
 }
 
 /**
@@ -609,7 +601,6 @@ static void wake_up_sem_queue_prepare(st
  */
 static void wake_up_sem_queue_do(struct list_head *pt)
 {
-#ifndef CONFIG_PREEMPT_RT_BASE
 	struct sem_queue *q, *t;
 	int did_something;
 
@@ -622,7 +613,6 @@ static void wake_up_sem_queue_do(struct
 	}
 	if (did_something)
 		preempt_enable();
-#endif
 }
 
 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
@@ -977,7 +967,7 @@ static int semctl_nolock(struct ipc_name
 		err = security_sem_semctl(NULL, cmd);
 		if (err)
 			return err;
-
+
 		memset(&seminfo,0,sizeof(seminfo));
 		seminfo.semmni = ns->sc_semmni;
 		seminfo.semmns = ns->sc_semmns;
@@ -997,7 +987,7 @@ static int semctl_nolock(struct ipc_name
 		}
 		max_id = ipc_get_maxid(&sem_ids(ns));
 		up_read(&sem_ids(ns).rw_mutex);
-		if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
+		if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
 			return -EFAULT;
 		return (max_id < 0) ? 0: max_id;
 	}
@@ -1672,7 +1662,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
 	/* We need to sleep on this operation, so we put the current
 	 * task into the pending queue and go to sleep.
 	 */
-
+
 	queue.sops = sops;
 	queue.nsops = nsops;
 	queue.undo = un;
@@ -1795,7 +1785,7 @@ int copy_semundo(unsigned long clone_fla
 			return error;
 		atomic_inc(&undo_list->refcnt);
 		tsk->sysvsem.undo_list = undo_list;
-	} else
+	} else
 		tsk->sysvsem.undo_list = NULL;
 
 	return 0;



--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [RT Stable]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]

  Powered by Linux