Re: [PATCH v4 3/8] x86/mm: Refactor flush_tlb_mm_range() to merge local and remote cases

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sun, 2017-05-28 at 10:00 -0700, Andy Lutomirski wrote:

> @@ -292,61 +303,33 @@ static unsigned long
> tlb_single_page_flush_ceiling __read_mostly = 33;
>  void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
>  				unsigned long end, unsigned long
> vmflag)
>  {
> -	unsigned long addr;
> -	struct flush_tlb_info info;
> -	/* do a global flush by default */
> -	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
> -
> -	preempt_disable();
> +	int cpu;
>  
> -	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
> -		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
> -	if (base_pages_to_flush > tlb_single_page_flush_ceiling)
> -		base_pages_to_flush = TLB_FLUSH_ALL;
> -
> -	if (current->active_mm != mm) {
> -		/* Synchronize with switch_mm. */
> -		smp_mb();
> -
> -		goto out;
> -	}
> -
> -	if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
> -		leave_mm(smp_processor_id());
> +	struct flush_tlb_info info = {
> +		.mm = mm,
> +	};
>  
> -		/* Synchronize with switch_mm. */
> -		smp_mb();
> +	cpu = get_cpu();
>  
> -		goto out;
> -	}
> +	/* Synchronize with switch_mm. */
> +	smp_mb();
>  
> -	/*
> -	 * Both branches below are implicit full barriers (MOV to CR
> or
> -	 * INVLPG) that synchronize with switch_mm.
> -	 */
> -	if (base_pages_to_flush == TLB_FLUSH_ALL) {
> -		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
> -		local_flush_tlb();
> +	/* Should we flush just the requested range? */
> +	if ((end != TLB_FLUSH_ALL) &&
> +	    !(vmflag & VM_HUGETLB) &&
> +	    ((end - start) >> PAGE_SHIFT) <=
> tlb_single_page_flush_ceiling) {
> +		info.start = start;
> +		info.end = end;
>  	} else {
> -		/* flush range by one by one 'invlpg' */
> -		for (addr = start; addr < end;	addr +=
> PAGE_SIZE) {
> -			count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
> -			__flush_tlb_single(addr);
> -		}
> -	}
> -	trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN,
> base_pages_to_flush);
> -out:
> -	info.mm = mm;
> -	if (base_pages_to_flush == TLB_FLUSH_ALL) {
>  		info.start = 0UL;
>  		info.end = TLB_FLUSH_ALL;
> -	} else {
> -		info.start = start;
> -		info.end = end;
>  	}
> -	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) <
> nr_cpu_ids)
> +
> +	if (mm == current->active_mm)
> +		flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);

It looks like this could cause flush_tlb_func_local to be
called over and over again even while cpu_tlbstate.state
equals TLBSTATE_LAZY, because active_mm is not changed by
leave_mm.

Do you want to also test cpu_tlbstate.state != TLBSTATE_OK
here, to ensure flush_tlb_func_local is only called when
necessary?

> +	if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
>  		flush_tlb_others(mm_cpumask(mm), &info);
> -	preempt_enable();
> +	put_cpu();
>  }
>  
>  

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@xxxxxxxxx.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@xxxxxxxxx";> email@xxxxxxxxx </a>



[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]
  Powered by Linux