Re: [PATCH 7/9] mm: swap: free each cluster individually in swap_entries_put_map_nr()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



>  
>  fallback:
> -	for (i = 0; i < nr; i++) {
> -		if (data_race(si->swap_map[offset + i])) {
> -			count = swap_entry_put(si, swp_entry(type, offset + i));
> -			if (count == SWAP_HAS_CACHE)
> -				has_cache = true;
> -		} else {
> -			WARN_ON_ONCE(1);
> -		}
> +	ci = lock_cluster(si, offset);

Relocking the cluster is unnecessary overhead if we don't release the ci lock
prior to jumping to fallback label for the locked case.

Maybe something like:

diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9cd93a03b25c..41aa841b86d6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1504,8 +1504,7 @@ static bool swap_entries_put_map(struct swap_info_struct *si,
 
        ci = lock_cluster(si, offset);
        if (!swap_is_last_map(si, offset, nr, &has_cache)) {
-               unlock_cluster(ci);
-               goto fallback;
+               goto locked_fallback;
        }
        if (!has_cache)
                swap_entries_free(si, ci, entry, nr);
@@ -1518,6 +1517,7 @@ static bool swap_entries_put_map(struct swap_info_struct *si,
 
 fallback:
        ci = lock_cluster(si, offset);
+locked_fallback:
        for (i = 0; i < nr; i++, entry.val++) {
                count = swap_entry_put_locked(si, ci, entry, 1);
                if (count == SWAP_HAS_CACHE)


Tim

> +	for (i = 0; i < nr; i++, entry.val++) {
> +		count = swap_entry_put_locked(si, ci, entry, 1);
> +		if (count == SWAP_HAS_CACHE)
> +			has_cache = true;
>  	}
> +	unlock_cluster(ci);
> +	return has_cache;
> +
> +}
> +






[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux