Re: [PATCH RFC -rt] mm: perform lru_add_drain_all() remotely

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, 28 Apr 2016 18:47:16 -0400
Rik van Riel <riel@xxxxxxxxxx> wrote:

> On Thu, 2016-04-28 at 16:31 -0400, Luiz Capitulino wrote:
> > lru_add_drain_all() works by scheduling lru_add_drain_cpu() to run
> > on all CPUs that have non-empty LRU pagevecs and then waiting for
> > the scheduled work to complete. However, workqueue threads may never
> > have the chance to run on a CPU that's running a SCHED_FIFO task.
> > This causes lru_add_drain_all() to block forever.
> > 
> > This commit solves this problem by changing lru_add_drain_all()
> > to drain the LRU pagevecs of remote CPUs. This is done by grabbing
> > swapvec_lock and calling lru_add_drain_cpu().
> > 
> > PS: This is based on an idea and initial implementation by
> >     Rik van Riel.  
> 
> I wrote maybe half the code in this patch.

And it was the hard part, sorry for not adding your signed-off-by.

> It should
> probably have my signed-off-by line too :)
> 
> Anyway, the patch looks fine to me and seems to work.
> 
>   Signed-off-by: Rik van Riel <riel@xxxxxxxxxx>
> > Signed-off-by: Luiz Capitulino <lcapitulino@xxxxxxxxxx>
> > ---
> >  include/linux/locallock.h | 27 +++++++++++++++++++++++++++
> >  mm/swap.c                 | 35 +++++++++++++++++++++++++----------
> >  2 files changed, 52 insertions(+), 10 deletions(-)
> > 
> > diff --git a/include/linux/locallock.h b/include/linux/locallock.h
> > index 6fe5928..2de478b 100644
> > --- a/include/linux/locallock.h
> > +++ b/include/linux/locallock.h
> > @@ -104,6 +104,17 @@ static inline void __local_unlock(struct
> > local_irq_lock *lv)
> >  		put_local_var(lvar);				
> > \
> >  	} while (0)
> >  
> > +#define local_lock_other_cpu(lvar, cpu)                         \
> > +	do {                                                    \
> > +		__local_lock(&per_cpu(lvar, cpu));              \
> > +	} while (0)
> > +
> > +#define local_unlock_other_cpu(lvar, cpu)                       \
> > +	do {                                                    \
> > +		__local_unlock(&per_cpu(lvar, cpu));            \
> > +	} while (0)
> > +
> > +
> >  static inline void __local_lock_irq(struct local_irq_lock *lv)
> >  {
> >  	spin_lock_irqsave(&lv->lock, lv->flags);
> > @@ -163,6 +174,22 @@ static inline int __local_lock_irqsave(struct
> > local_irq_lock *lv)
> >  		_flags = per_cpu(lvar, cpu).flags;			
> > \
> >  	} while (0)
> >  
> > +#define local_lock_irqsave_other_cpu(lvar, _flags, cpu)		
> > 	\
> > +	do {								
> > \
> > +		if (cpu == smp_processor_id())			
> > 	\
> > +			local_lock_irqsave(lvar, _flags);		
> > \
> > +		else							
> > \
> > +			local_lock_other_cpu(lvar, cpu);		
> > \
> > +	} while (0)
> > +
> > +#define local_unlock_irqrestore_other_cpu(lvar, _flags, cpu)	
> >         \
> > +	do {								
> > \
> > +		if (cpu == smp_processor_id())			
> > 	\
> > +			local_unlock_irqrestore(lvar, _flags);	
> > 	\
> > +		else							
> > \
> > +			local_unlock_other_cpu(lvar, cpu);		
> > \
> > +	} while (0)
> > +
> >  static inline int __local_unlock_irqrestore(struct local_irq_lock
> > *lv,
> >  					    unsigned long flags)
> >  {
> > diff --git a/mm/swap.c b/mm/swap.c
> > index ca194ae..9dc6956 100644
> > --- a/mm/swap.c
> > +++ b/mm/swap.c
> > @@ -821,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
> >  		unsigned long flags;
> >  
> >  		/* No harm done if a racing interrupt already did
> > this */
> > -		local_lock_irqsave(rotate_lock, flags);
> > +		local_lock_irqsave_other_cpu(rotate_lock, flags,
> > cpu);
> >  		pagevec_move_tail(pvec);
> > -		local_unlock_irqrestore(rotate_lock, flags);
> > +		local_unlock_irqrestore_other_cpu(rotate_lock,
> > flags, cpu);
> >  	}
> >  
> >  	pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
> > @@ -866,12 +866,32 @@ void lru_add_drain(void)
> >  	local_unlock_cpu(swapvec_lock);
> >  }
> >  
> > +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> > +
> > +#ifdef CONFIG_PREEMPT_RT_BASE
> > +static inline void remote_lru_add_drain(int cpu, struct cpumask
> > *has_work)
> > +{
> > +	local_lock_other_cpu(swapvec_lock, cpu);
> > +	lru_add_drain_cpu(cpu);
> > +	local_unlock_other_cpu(swapvec_lock, cpu);
> > +}
> > +#else
> >  static void lru_add_drain_per_cpu(struct work_struct *dummy)
> >  {
> >  	lru_add_drain();
> >  }
> >  
> > -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
> > +static inline void remote_lru_add_drain(int cpu, struct cpumask
> > *has_work)
> > +{
> > +		struct work_struct *work =
> > &per_cpu(lru_add_drain_work, cpu);
> > +
> > +		INIT_WORK(work, lru_add_drain_per_cpu);
> > +		schedule_work_on(cpu, work);
> > +		cpumask_set_cpu(cpu, &has_work);
> > +
> > +}
> > +#endif
> > +
> >  
> >  void lru_add_drain_all(void)
> >  {
> > @@ -884,16 +904,11 @@ void lru_add_drain_all(void)
> >  	cpumask_clear(&has_work);
> >  
> >  	for_each_online_cpu(cpu) {
> > -		struct work_struct *work =
> > &per_cpu(lru_add_drain_work, cpu);
> > -
> >  		if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
> >  		    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu))
> > ||
> >  		    pagevec_count(&per_cpu(lru_deactivate_file_pvecs
> > , cpu)) ||
> > -		    need_activate_page_drain(cpu)) {
> > -			INIT_WORK(work, lru_add_drain_per_cpu);
> > -			schedule_work_on(cpu, work);
> > -			cpumask_set_cpu(cpu, &has_work);
> > -		}
> > +		    need_activate_page_drain(cpu))
> > +				remote_lru_add_drain(cpu,
> > &has_work);
> >  	}
> >  
> >  	for_each_cpu(cpu, &has_work)  

--
To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [RT Stable]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]

  Powered by Linux