Hi Rrank,
Thanks for your debug logs. I just improved your patch, could you have a
try ?
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 06fdb3d05095..75bd46d0291b 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1194,7 +1194,7 @@ ssize_t __ceph_sync_read(struct inode *inode,
loff_t *ki_pos,
}
idx = 0;
- left = ret > 0 ? ret : 0;
+ left = ret > 0 ? umin(ret, i_size) : 0;
while (left > 0) {
size_t plen, copied;
@@ -1223,15 +1223,13 @@ ssize_t __ceph_sync_read(struct inode *inode,
loff_t *ki_pos,
}
if (ret > 0) {
- if (off > *ki_pos) {
- if (off >= i_size) {
- *retry_op = CHECK_EOF;
- ret = i_size - *ki_pos;
- *ki_pos = i_size;
- } else {
- ret = off - *ki_pos;
- *ki_pos = off;
- }
+ if (off >= i_size) {
+ *retry_op = CHECK_EOF;
+ ret = i_size - *ki_pos;
+ *ki_pos = i_size;
+ } else {
+ ret = off - *ki_pos;
+ *ki_pos = off;
}
if (last_objver)
- Xiubo
On 1/24/24 11:25, Frank Hsiao 蕭法宣 wrote:
When multiple ceph kernel clients perform read/write on the same file, the read
operation(ceph_sync_read) returns EOF(ret = 0) even though the file has been
written by another client.
My envs use Ceph quincy(v17.2.6) and mount cephfs by ceph kernel client. For the
client side, I use Samba(v4.18.8) to export the folder as smb share and test it
with smbtorture. The test case is smb2.rw.rw1 with the following failure
message:
test: samba4.smb2.rw.rw1
Checking data integrity over 10 ops
read failed(NT_STATUS_END_OF_FILE)
failure: samba4.smb2.rw.rw1 [
Exception: read 0, expected 440
]
After some testing, I figured out that the failure only happens when I have
linux kernel version>=5.16-rc1, specifically after commit
c3d8e0b5de487a7c462781745bc17694a4266696. Kernel logs as below(on 5.16-rc1):
[Wed Jan 10 09:44:56 2024] [153221] ceph_read_iter:1559: ceph: aio_sync_read
00000000789dccee 100000010ef.fffffffffffffffe 0~440 got cap refs on Fr
[Wed Jan 10 09:44:56 2024] [153221] ceph_sync_read:852: ceph: sync_read on file
00000000d9e861fb 0~440
[Wed Jan 10 09:44:56 2024] [153221] ceph_sync_read:913: ceph: sync_read 0~440 got 440 i_size 0
[Wed Jan 10 09:44:56 2024] [153221] ceph_sync_read:966: ceph: sync_read result 0 retry_op 2
...
[Wed Jan 10 09:44:57 2024] [153221] ceph_read_iter:1559: ceph: aio_sync_read
00000000789dccee 100000010ef.fffffffffffffffe 0~440 got cap refs on Fr
[Wed Jan 10 09:44:57 2024] [153221] ceph_sync_read:852: ceph: sync_read on file
00000000d9e861fb 0~0
The logs indicate that:
1. ceph_sync_read may read data but i_size is obsolete in simultaneous rw situation
2. The commit in 5.16-rc1 cap ret to i_size and set retry_op = CHECK_EOF
3. When retrying, ceph_sync_read gets len=0 since iov count has modified in
copy_page_to_iter
4. ceph_read_iter return 0
I'm not sure if my understanding is correct. As a reference, here is my simple
patch and I need more comments. The purpose of the patch is to prevent
sync read handler from doing copy page when ret > i_size.
Thanks.
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 220a41831b46..5897f52ee998 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -926,6 +926,9 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
idx = 0;
left = ret > 0 ? ret : 0;
+ if (left > i_size) {
+ left = i_size;
+ }
while (left > 0) {
size_t len, copied;
page_off = off & ~PAGE_MASK;
@@ -952,7 +955,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
break;
}
- if (off > iocb->ki_pos) {
+ if (off > iocb->ki_pos || i_size == 0) {
if (off >= i_size) {
*retry_op = CHECK_EOF;
ret = i_size - iocb->ki_pos;