current implementation amuses, that all servers support 64-bit file sizes. This is not always true. Use FATTR4_MAXFILESIZE attribute to discover supported maximum. Update LOCK6 test to use it. Signed-off-by: Tigran Mkrtchyan <tigran.mkrtchyan@xxxxxxx> --- nfs4.0/nfs4lib.py | 6 ++++++ nfs4.0/servertests/st_lock.py | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/nfs4.0/nfs4lib.py b/nfs4.0/nfs4lib.py index 5031feb..41870dc 100644 --- a/nfs4.0/nfs4lib.py +++ b/nfs4.0/nfs4lib.py @@ -587,6 +587,12 @@ class NFS4Client(rpc.RPCClient, nfs4_ops.NFS4Operations): d = self.do_getattrdict([], [FATTR4_LEASE_TIME]) return d[FATTR4_LEASE_TIME] + def getMaxFileSize(self): + """Get maximum supported file size""" + d = self.do_getattrdict([], [FATTR4_MAXFILESIZE]) + return d[FATTR4_MAXFILESIZE] + + def create_obj(self, path, type=NF4DIR, attrs={FATTR4_MODE:0755}, linkdata="/etc/X11"): if __builtins__['type'](path) is str: diff --git a/nfs4.0/servertests/st_lock.py b/nfs4.0/servertests/st_lock.py index d54614d..80518f4 100644 --- a/nfs4.0/servertests/st_lock.py +++ b/nfs4.0/servertests/st_lock.py @@ -189,8 +189,9 @@ def testLenTooLong(t, env): """ c = env.c1 c.init_connection() + max_size = c.getMaxFileSize(); fh, stateid = c.create_confirm(t.code) - res = c.lock_file(t.code, fh, stateid, 100, 0xfffffffffffffffe) + res = c.lock_file(t.code, fh, stateid, 100, max_size) check(res, NFS4ERR_INVAL, "LOCK with offset+len overflow") def testNoFh(t, env): -- 2.5.0 -- To unsubscribe from this list: send the line "unsubscribe linux-nfs" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html