Replace the hardcoded 2TB limit with a dynamic limit based on the block size now that we have fixed the few overflows preventing operation with large volumes. Signed-off-by: Christoph Hellwig <hch@xxxxxxxxxx> Index: linux-2.6/fs/hfsplus/super.c =================================================================== --- linux-2.6.orig/fs/hfsplus/super.c 2011-02-15 13:45:06.292423835 +0100 +++ linux-2.6/fs/hfsplus/super.c 2011-02-15 13:54:53.598943828 +0100 @@ -393,6 +393,13 @@ static int hfsplus_fill_super(struct sup if (!sbi->rsrc_clump_blocks) sbi->rsrc_clump_blocks = 1; + err = generic_check_addressable(sbi->alloc_blksz_shift, + sbi->total_blocks); + if (err) { + printk(KERN_ERR "hfs: filesystem size too large.\n"); + goto out_free_vhdr; + } + /* Set up operations so we can load metadata */ sb->s_op = &hfsplus_sops; sb->s_maxbytes = MAX_LFS_FILESIZE; @@ -417,6 +424,8 @@ static int hfsplus_fill_super(struct sup sb->s_flags |= MS_RDONLY; } + err = -EINVAL; + /* Load metadata objects (B*Trees) */ sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); if (!sbi->ext_tree) { Index: linux-2.6/fs/hfsplus/wrapper.c =================================================================== --- linux-2.6.orig/fs/hfsplus/wrapper.c 2011-02-15 13:44:44.208420508 +0100 +++ linux-2.6/fs/hfsplus/wrapper.c 2011-02-15 13:45:03.883421555 +0100 @@ -138,10 +138,6 @@ int hfsplus_read_wrapper(struct super_bl if (hfsplus_get_last_session(sb, &part_start, &part_size)) goto out; - if ((u64)part_start + part_size > 0x100000000ULL) { - pr_err("hfs: volumes larger than 2TB are not supported yet\n"); - goto out; - } error = -ENOMEM; sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL); -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html