linux-next: manual merge of the btrfs tree with Linus' tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi all,

Today's linux-next merge of the btrfs tree got a conflict in:

  fs/btrfs/extent_io.c

between commit:

  600f111ef51d ("fs: Rename mapping private members")

from Linus' tree and commits:

  08236d11031b ("btrfs: cache folio size and shift in extent_buffer")
  13df3775efca ("btrfs: cleanup metadata page pointer usage")
  082d5bb9b336 ("btrfs: migrate extent_buffer::pages[] to folio")
  09e6cef19c9f ("btrfs: refactor alloc_extent_buffer() to allocate-then-attach method")
  cfbf07e2787e ("btrfs: migrate to use folio private instead of page private")

from the btrfs tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc fs/btrfs/extent_io.c
index b6ff6f320198,c8aabe3be169..000000000000
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@@ -874,14 -901,14 +901,14 @@@ static int attach_extent_buffer_folio(s
  	 * For cloned or dummy extent buffers, their pages are not mapped and
  	 * will not race with any other ebs.
  	 */
- 	if (page->mapping)
- 		lockdep_assert_held(&page->mapping->i_private_lock);
+ 	if (folio->mapping)
 -		lockdep_assert_held(&folio->mapping->private_lock);
++		lockdep_assert_held(&folio->mapping->i_private_lock);
  
  	if (fs_info->nodesize >= PAGE_SIZE) {
- 		if (!PagePrivate(page))
- 			attach_page_private(page, eb);
+ 		if (!folio_test_private(folio))
+ 			folio_attach_private(folio, eb);
  		else
- 			WARN_ON(page->private != (unsigned long)eb);
+ 			WARN_ON(folio_get_private(folio) != eb);
  		return 0;
  	}
  
@@@ -1741,9 -1775,9 +1775,9 @@@ static int submit_eb_subpage(struct pag
  		 * Take private lock to ensure the subpage won't be detached
  		 * in the meantime.
  		 */
 -		spin_lock(&page->mapping->private_lock);
 +		spin_lock(&page->mapping->i_private_lock);
- 		if (!PagePrivate(page)) {
+ 		if (!folio_test_private(folio)) {
 -			spin_unlock(&page->mapping->private_lock);
 +			spin_unlock(&page->mapping->i_private_lock);
  			break;
  		}
  		spin_lock_irqsave(&subpage->lock, flags);
@@@ -1816,9 -1851,9 +1851,9 @@@ static int submit_eb_page(struct page *
  	if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
  		return submit_eb_subpage(page, wbc);
  
 -	spin_lock(&mapping->private_lock);
 +	spin_lock(&mapping->i_private_lock);
- 	if (!PagePrivate(page)) {
+ 	if (!folio_test_private(folio)) {
 -		spin_unlock(&mapping->private_lock);
 +		spin_unlock(&mapping->i_private_lock);
  		return 0;
  	}
  
@@@ -3062,10 -3097,10 +3097,10 @@@ static bool folio_range_has_eb(struct b
  {
  	struct btrfs_subpage *subpage;
  
- 	lockdep_assert_held(&page->mapping->i_private_lock);
 -	lockdep_assert_held(&folio->mapping->private_lock);
++	lockdep_assert_held(&folio->mapping->i_private_lock);
  
- 	if (PagePrivate(page)) {
- 		subpage = (struct btrfs_subpage *)page->private;
+ 	if (folio_test_private(folio)) {
+ 		subpage = folio_get_private(folio);
  		if (atomic_read(&subpage->eb_refs))
  			return true;
  		/*
@@@ -3084,15 -3119,15 +3119,15 @@@ static void detach_extent_buffer_folio(
  	const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
  
  	/*
- 	 * For mapped eb, we're going to change the page private, which should
+ 	 * For mapped eb, we're going to change the folio private, which should
 -	 * be done under the private_lock.
 +	 * be done under the i_private_lock.
  	 */
  	if (mapped)
- 		spin_lock(&page->mapping->i_private_lock);
 -		spin_lock(&folio->mapping->private_lock);
++		spin_lock(&folio->mapping->i_private_lock);
  
- 	if (!PagePrivate(page)) {
+ 	if (!folio_test_private(folio)) {
  		if (mapped)
- 			spin_unlock(&page->mapping->i_private_lock);
 -			spin_unlock(&folio->mapping->private_lock);
++			spin_unlock(&folio->mapping->i_private_lock);
  		return;
  	}
  
@@@ -3101,22 -3136,18 +3136,18 @@@
  		 * We do this since we'll remove the pages after we've
  		 * removed the eb from the radix tree, so we could race
  		 * and have this page now attached to the new eb.  So
- 		 * only clear page_private if it's still connected to
+ 		 * only clear folio if it's still connected to
  		 * this eb.
  		 */
- 		if (PagePrivate(page) &&
- 		    page->private == (unsigned long)eb) {
+ 		if (folio_test_private(folio) && folio_get_private(folio) == eb) {
  			BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
- 			BUG_ON(PageDirty(page));
- 			BUG_ON(PageWriteback(page));
- 			/*
- 			 * We need to make sure we haven't be attached
- 			 * to a new eb.
- 			 */
- 			detach_page_private(page);
+ 			BUG_ON(folio_test_dirty(folio));
+ 			BUG_ON(folio_test_writeback(folio));
+ 			/* We need to make sure we haven't be attached to a new eb. */
+ 			folio_detach_private(folio);
  		}
  		if (mapped)
- 			spin_unlock(&page->mapping->i_private_lock);
 -			spin_unlock(&folio->mapping->private_lock);
++			spin_unlock(&folio->mapping->i_private_lock);
  		return;
  	}
  
@@@ -3130,16 -3161,16 +3161,16 @@@
  		return;
  	}
  
- 	btrfs_page_dec_eb_refs(fs_info, page);
+ 	btrfs_folio_dec_eb_refs(fs_info, folio);
  
  	/*
- 	 * We can only detach the page private if there are no other ebs in the
+ 	 * We can only detach the folio private if there are no other ebs in the
  	 * page range and no unfinished IO.
  	 */
- 	if (!page_range_has_eb(fs_info, page))
- 		btrfs_detach_subpage(fs_info, page);
+ 	if (!folio_range_has_eb(fs_info, folio))
+ 		btrfs_detach_subpage(fs_info, folio);
  
- 	spin_unlock(&page->mapping->i_private_lock);
 -	spin_unlock(&folio->mapping->private_lock);
++	spin_unlock(&folio->mapping->i_private_lock);
  }
  
  /* Release all pages attached to the extent buffer */
@@@ -3516,11 -3603,9 +3603,9 @@@ struct extent_buffer *alloc_extent_buff
  
  	btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
  
- 	num_pages = num_extent_pages(eb);
- 
  	/*
- 	 * Preallocate page->private for subpage case, so that we won't
+ 	 * Preallocate folio private for subpage case, so that we won't
 -	 * allocate memory with private_lock nor page lock hold.
 +	 * allocate memory with i_private_lock nor page lock hold.
  	 *
  	 * The memory will be freed by attach_extent_buffer_page() or freed
  	 * manually if we exit earlier.
@@@ -3533,26 -3618,61 +3618,61 @@@
  		}
  	}
  
- 	for (i = 0; i < num_pages; i++, index++) {
- 		p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
- 		if (!p) {
- 			exists = ERR_PTR(-ENOMEM);
- 			btrfs_free_subpage(prealloc);
- 			goto free_eb;
+ reallocate:
+ 	/* Allocate all pages first. */
+ 	ret = alloc_eb_folio_array(eb, __GFP_NOFAIL);
+ 	if (ret < 0) {
+ 		btrfs_free_subpage(prealloc);
+ 		goto out;
+ 	}
+ 
+ 	num_folios = num_extent_folios(eb);
+ 	/* Attach all pages to the filemap. */
+ 	for (int i = 0; i < num_folios; i++) {
+ 		struct folio *folio;
+ 
+ 		ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
+ 		if (ret > 0) {
+ 			ASSERT(existing_eb);
+ 			goto out;
  		}
  
- 		spin_lock(&mapping->i_private_lock);
- 		exists = grab_extent_buffer(fs_info, p);
- 		if (exists) {
- 			spin_unlock(&mapping->i_private_lock);
- 			unlock_page(p);
- 			put_page(p);
- 			mark_extent_buffer_accessed(exists, p);
- 			btrfs_free_subpage(prealloc);
- 			goto free_eb;
+ 		/*
+ 		 * TODO: Special handling for a corner case where the order of
+ 		 * folios mismatch between the new eb and filemap.
+ 		 *
+ 		 * This happens when:
+ 		 *
+ 		 * - the new eb is using higher order folio
+ 		 *
+ 		 * - the filemap is still using 0-order folios for the range
+ 		 *   This can happen at the previous eb allocation, and we don't
+ 		 *   have higher order folio for the call.
+ 		 *
+ 		 * - the existing eb has already been freed
+ 		 *
+ 		 * In this case, we have to free the existing folios first, and
+ 		 * re-allocate using the same order.
+ 		 * Thankfully this is not going to happen yet, as we're still
+ 		 * using 0-order folios.
+ 		 */
+ 		if (unlikely(ret == -EAGAIN)) {
+ 			ASSERT(0);
+ 			goto reallocate;
  		}
+ 		attached++;
+ 
+ 		/*
+ 		 * Only after attach_eb_folio_to_filemap(), eb->folios[] is
+ 		 * reliable, as we may choose to reuse the existing page cache
+ 		 * and free the allocated page.
+ 		 */
+ 		folio = eb->folios[i];
+ 		eb->folio_size = folio_size(folio);
+ 		eb->folio_shift = folio_shift(folio);
 -		spin_lock(&mapping->private_lock);
++		spin_lock(&mapping->i_private_lock);
  		/* Should not fail, as we have preallocated the memory */
- 		ret = attach_extent_buffer_page(eb, p, prealloc);
+ 		ret = attach_extent_buffer_folio(eb, folio, prealloc);
  		ASSERT(!ret);
  		/*
  		 * To inform we have extra eb under allocation, so that
@@@ -3563,12 -3683,21 +3683,21 @@@
  		 * detach_extent_buffer_page().
  		 * Thus needs no special handling in error path.
  		 */
- 		btrfs_page_inc_eb_refs(fs_info, p);
+ 		btrfs_folio_inc_eb_refs(fs_info, folio);
 -		spin_unlock(&mapping->private_lock);
 +		spin_unlock(&mapping->i_private_lock);
  
- 		WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
- 		eb->pages[i] = p;
- 		if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len))
+ 		WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
+ 
+ 		/*
+ 		 * Check if the current page is physically contiguous with previous eb
+ 		 * page.
+ 		 * At this stage, either we allocated a large folio, thus @i
+ 		 * would only be 0, or we fall back to per-page allocation.
+ 		 */
+ 		if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
+ 			page_contig = false;
+ 
+ 		if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
  			uptodate = 0;
  
  		/*
@@@ -4566,11 -4773,11 +4773,11 @@@ static int try_release_subpage_extent_b
  		release_extent_buffer(eb);
  	}
  	/*
- 	 * Finally to check if we have cleared page private, as if we have
- 	 * released all ebs in the page, the page private should be cleared now.
+ 	 * Finally to check if we have cleared folio private, as if we have
+ 	 * released all ebs in the page, the folio private should be cleared now.
  	 */
 -	spin_lock(&page->mapping->private_lock);
 +	spin_lock(&page->mapping->i_private_lock);
- 	if (!PagePrivate(page))
+ 	if (!folio_test_private(page_folio(page)))
  		ret = 1;
  	else
  		ret = 0;
@@@ -4587,12 -4795,12 +4795,12 @@@ int try_release_extent_buffer(struct pa
  		return try_release_subpage_extent_buffer(page);
  
  	/*
- 	 * We need to make sure nobody is changing page->private, as we rely on
- 	 * page->private as the pointer to extent buffer.
+ 	 * We need to make sure nobody is changing folio private, as we rely on
+ 	 * folio private as the pointer to extent buffer.
  	 */
 -	spin_lock(&page->mapping->private_lock);
 +	spin_lock(&page->mapping->i_private_lock);
- 	if (!PagePrivate(page)) {
+ 	if (!folio_test_private(folio)) {
 -		spin_unlock(&page->mapping->private_lock);
 +		spin_unlock(&page->mapping->i_private_lock);
  		return 1;
  	}
  

Attachment: pgpqKMuMSI66R.pgp
Description: OpenPGP digital signature


[Index of Archives]     [Linux Kernel]     [Linux USB Development]     [Yosemite News]     [Linux SCSI]

  Powered by Linux