I have mapped port 32505 to 23860, however when connect via s3cmd it fails with "ERROR: S3 Temporary Error: Request failed for: /. Please try again later." . has anyone ecounted same issue? [root@vm-04 ~]# s3cmd ls WARNING: Retrying failed request: / ('') WARNING: Waiting 3 sec... WARNING: Retrying failed request: / ('') WARNING: Waiting 6 sec... WARNING: Retrying failed request: / ('') WARNING: Waiting 9 sec... WARNING: Retrying failed request: / ('') WARNING: Waiting 12 sec... WARNING: Retrying failed request: / ('') WARNING: Waiting 15 sec... ERROR: S3 Temporary Error: Request failed for: /. Please try again later. [root@vm-04 ~]# [root@vm-04 ~]# cat .s3cfg [default] access_key = E9BJAC6QKLTOKVJR4TZC secret_key = KBvm0YqeAFtVM0I1gUhX5wrnwpHZZ5SLhiUa56ss host_base = xxxxxx:23860 host_bucket = xxxxxx:23860/bucket-king use_https = False signature_v2 = False [root@vm-04 ~]# bash-4.4$ ceph -s cluster: id: dd6a082e-868d-44e0-b93b-0f4956e5dd1a health: HEALTH_OK services: mon: 3 daemons, quorum a,b,c (age 4m) mgr: b(active, since 4m), standbys: a mds: 1/1 daemons up, 1 standby osd: 3 osds: 3 up (since 4m), 3 in (since 26m) rgw: 1 daemon active (1 hosts, 1 zones) data: volumes: 1/1 healthy pools: 14 pools, 233 pgs objects: 548 objects, 808 MiB usage: 2.4 GiB used, 148 GiB / 150 GiB avail pgs: 233 active+clean bash-4.4$ [root@vm-01 ~]# kubectl get svc -nrook-ceph NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE rook-ceph-mgr ClusterIP 10.105.165.62 <none> 9283/TCP 26m rook-ceph-mgr-dashboard ClusterIP 10.102.18.178 <none> 8443/TCP 26m rook-ceph-mgr-dashboard-external-https NodePort 10.100.66.254 <none> 8443:31091/TCP 24m rook-ceph-rgw-my-store ClusterIP 10.104.146.147 <none> 80/TCP 16m rook-ceph-rgw-my-store-external NodePort 10.106.39.241 <none> 80:32505/TCP 14m [root@vm-01 ~]# [root@vm-01 ~]# [root@vm-01 ~]# kubectl get pods -nrook-ceph | grep rgw rook-ceph-rgw-my-store-a-67d579d99b-5xd64 2/2 Running 0 5m22s [root@vm-01 ~]# [root@vm-01 ~]# [root@vm-01 ~]# [root@vm-01 ~]# [root@vm-01 ~]# kubectl -nrook-ceph logs rook-ceph-rgw-my-store-a-67d579d99b-5xd64 | tail -20 Defaulted container "rgw" out of: rgw, log-collector, chown-container-data-dir (init) debug 2024-04-11T13:42:33.635+0000 7fee46b43700 1 beast: 0x7fee079c2730: 192.168.100.9 - rook-ceph-internal-s3-user-checker-12e1356a-bbec-4ba6-a333-597842bf6f94 [11/Apr/2024:13:42:33.630 +0000] "DELETE /rook-ceph-bucket-checker-12e1356a-bbec-4ba6-a333-597842bf6f94/rookHealthCheckTestObject HTTP/1.1" 204 0 - "aws-sdk-go/1.44.118 (go1.19.4; linux; amd64)" - latency=0.005000210s debug 2024-04-11T13:42:33.635+0000 7fee4e352700 1 ====== starting new request req=0x7fee079c2730 ===== debug 2024-04-11T13:42:33.647+0000 7fee4333c700 1 ====== req done req=0x7fee079c2730 op status=0 http_status=200 latency=0.012000503s ====== debug 2024-04-11T13:42:33.647+0000 7fee4333c700 1 beast: 0x7fee079c2730: 192.168.100.9 - rook-ceph-internal-s3-user-checker-12e1356a-bbec-4ba6-a333-597842bf6f94 [11/Apr/2024:13:42:33.635 +0000] "PUT /rook-ceph-bucket-checker-12e1356a-bbec-4ba6-a333-597842bf6f94 HTTP/1.1" 200 0 - "aws-sdk-go/1.44.118 (go1.19.4; linux; amd64)" - latency=0.012000503s debug 2024-04-11T13:42:33.648+0000 7fee4e352700 1 ====== starting new request req=0x7fee079c2730 ===== debug 2024-04-11T13:42:33.666+0000 7fee3b32c700 1 ====== req done req=0x7fee079c2730 op status=0 http_status=200 latency=0.018000754s ====== debug 2024-04-11T13:42:33.666+0000 7fee3b32c700 1 beast: 0x7fee079c2730: 192.168.100.9 - rook-ceph-internal-s3-user-checker-12e1356a-bbec-4ba6-a333-597842bf6f94 [11/Apr/2024:13:42:33.648 +0000] "PUT /rook-ceph-bucket-checker-12e1356a-bbec-4ba6-a333-597842bf6f94/rookHealthCheckTestObject HTTP/1.1" 200 21 - "aws-sdk-go/1.44.118 (go1.19.4; linux; amd64)" - latency=0.018000754s debug 2024-04-11T13:42:33.667+0000 7fee4e352700 1 ====== starting new request req=0x7fee079c2730 ===== debug 2024-04-11T13:42:33.669+0000 7fee4e352700 1 ====== req done req=0x7fee079c2730 op status=0 http_status=200 latency=0.002000083s ====== debug 2024-04-11T13:42:33.669+0000 7fee4e352700 1 beast: 0x7fee079c2730: 192.168.100.9 - rook-ceph-internal-s3-user-checker-12e1356a-bbec-4ba6-a333-597842bf6f94 [11/Apr/2024:13:42:33.667 +0000] "GET /rook-ceph-bucket-checker-12e1356a-bbec-4ba6-a333-597842bf6f94/rookHealthCheckTestObject HTTP/1.1" 200 21 - "aws-sdk-go/1.44.118 (go1.19.4; linux; amd64)" - latency=0.002000083s debug 2024-04-11T13:42:33.710+0000 7fee43b3d700 1 ====== starting new request req=0x7fee079c2730 ===== debug 2024-04-11T13:42:33.719+0000 7fee2e312700 1 ====== req done req=0x7fee079c2730 op status=0 http_status=204 latency=0.009000377s ====== debug 2024-04-11T13:42:33.719+0000 7fee2e312700 1 beast: 0x7fee079c2730: 192.168.100.9 - rook-ceph-internal-s3-user-checker-12e1356a-bbec-4ba6-a333-597842bf6f94 [11/Apr/2024:13:42:33.710 +0000] "DELETE /rook-ceph-bucket-checker-12e1356a-bbec-4ba6-a333-597842bf6f94/rookHealthCheckTestObject HTTP/1.1" 204 0 - "aws-sdk-go/1.44.118 (go1.19.4; linux; amd64)" - latency=0.009000377s debug 2024-04-11T13:42:37.863+0000 7fee26b03700 1 ====== starting new request req=0x7fee078c0730 ===== debug 2024-04-11T13:42:37.863+0000 7fee26b03700 1 ====== req done req=0x7fee078c0730 op status=0 http_status=200 latency=0.000000000s ====== debug 2024-04-11T13:42:37.863+0000 7fee26b03700 1 beast: 0x7fee078c0730: 192.168.100.8 - - [11/Apr/2024:13:42:37.863 +0000] "GET /swift/healthcheck HTTP/1.1" 200 0 - "kube-probe/1.28" - latency=0.000000000s debug 2024-04-11T13:42:41.078+0000 7fef3a54b700 0 rgw UsageLogger: WARNING: RGWRados::log_usage(): user name empty (bucket=), skipping debug 2024-04-11T13:42:47.862+0000 7fee24aff700 1 ====== starting new request req=0x7fee07a43730 ===== debug 2024-04-11T13:42:47.862+0000 7fee24aff700 1 ====== req done req=0x7fee07a43730 op status=0 http_status=200 latency=0.000000000s ====== debug 2024-04-11T13:42:47.862+0000 7fee24aff700 1 beast: 0x7fee07a43730: 192.168.100.8 - - [11/Apr/2024:13:42:47.862 +0000] "GET /swift/healthcheck HTTP/1.1" 200 0 - "kube-probe/1.28" - latency=0.000000000s [root@vm-01 ~]# [root@vm-01 ~]# _______________________________________________ ceph-users mailing list -- ceph-users@xxxxxxx To unsubscribe send an email to ceph-users-leave@xxxxxxx