Currently we forbid page_cache_tree_insert() to replace exceptional radix tree entries for DAX inodes. However to make DAX faults race free we will lock radix tree entries and when hole is created, we need to replace such locked radix tree entry with a hole page. So modify page_cache_tree_insert() to allow that. Reviewed-by: Ross Zwisler <ross.zwisler@xxxxxxxxxxxxxxx> Signed-off-by: Jan Kara <jack@xxxxxxx> --- include/linux/dax.h | 1 + mm/filemap.c | 21 ++++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/include/linux/dax.h b/include/linux/dax.h index d0883daa66d9..8558d3770a30 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -3,6 +3,7 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/radix-tree.h> #include <asm/pgtable.h> /* We use lowest available exceptional entry bit for locking */ diff --git a/mm/filemap.c b/mm/filemap.c index f2479af09da9..dfe55c2cfb34 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -597,14 +597,21 @@ static int page_cache_tree_insert(struct address_space *mapping, if (!radix_tree_exceptional_entry(p)) return -EEXIST; - if (WARN_ON(dax_mapping(mapping))) - return -EINVAL; - - if (shadowp) - *shadowp = p; mapping->nrexceptional--; - if (node) - workingset_node_shadows_dec(node); + if (!dax_mapping(mapping)) { + if (shadowp) + *shadowp = p; + if (node) + workingset_node_shadows_dec(node); + } else { + /* DAX can replace empty locked entry with a hole */ + WARN_ON_ONCE(p != + (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | + RADIX_DAX_ENTRY_LOCK)); + /* DAX accounts exceptional entries as normal pages */ + if (node) + workingset_node_pages_dec(node); + } } radix_tree_replace_slot(slot, page); mapping->nrpages++; -- 2.6.6 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html