Hello Eugen, The output shows that all daemon are configured , would like to know also if there is a possibility of removing those RGW and redeploy them to see if there will be changes. root@ceph-mon1:~# ceph service dump { "epoch": 1740, "modified": "2023-02-06T15:21:42.235595+0200", "services": { "rgw": { "daemons": { "summary": "", "479626": { "start_epoch": 1265, "start_stamp": "2023-02-03T11:41:58.680359+0200", "gid": 479626, "addr": "10.10.110.199:0/1880864062", "metadata": { "arch": "x86_64", "ceph_release": "pacific", "ceph_version": "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)", "ceph_version_short": "16.2.11", "cpu": "Intel(R) Xeon(R) Gold 5215 CPU @ 2.50GHz", "distro": "ubuntu", "distro_description": "Ubuntu 20.04.5 LTS", "distro_version": "20.04", "frontend_config#0": "beast endpoint= 10.10.110.199:8080", "frontend_type#0": "beast", "hostname": "ceph-osd1", "id": "ceph-osd1.rgw0", "kernel_description": "#154-Ubuntu SMP Thu Jan 5 17:03:22 UTC 2023", "kernel_version": "5.4.0-137-generic", "mem_swap_kb": "8388604", "mem_total_kb": "263556752", "num_handles": "1", "os": "Linux", "pid": "47369", "realm_id": "", "realm_name": "", "zone_id": "689f9b30-4380-439e-8e7c-3c2046079a2b", "zone_name": "default", "zonegroup_id": "c2d060fe-bd6c-4bfb-a0cd-596124765015", "zonegroup_name": "default" }, "task_status": {} }, "489542": { "start_epoch": 1267, "start_stamp": "2023-02-03T11:42:30.711278+0200", "gid": 489542, "addr": "10.10.110.200:0/3909810130", "metadata": { "arch": "x86_64", "ceph_release": "pacific", "ceph_version": "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)", "ceph_version_short": "16.2.11", "cpu": "Intel(R) Xeon(R) Gold 5215 CPU @ 2.50GHz", "distro": "ubuntu", "distro_description": "Ubuntu 20.04.5 LTS", "distro_version": "20.04", "frontend_config#0": "beast endpoint= 10.10.110.200:8080", "frontend_type#0": "beast", "hostname": "ceph-osd2", "id": "ceph-osd2.rgw0", "kernel_description": "#154-Ubuntu SMP Thu Jan 5 17:03:22 UTC 2023", "kernel_version": "5.4.0-137-generic", "mem_swap_kb": "8388604", "mem_total_kb": "263556752", "num_handles": "1", "os": "Linux", "pid": "392257", "realm_id": "", "realm_name": "", "zone_id": "689f9b30-4380-439e-8e7c-3c2046079a2b", "zone_name": "default", "zonegroup_id": "c2d060fe-bd6c-4bfb-a0cd-596124765015", "zonegroup_name": "default" }, "task_status": {} }, "489605": { "start_epoch": 1268, "start_stamp": "2023-02-03T11:42:58.724973+0200", "gid": 489605, "addr": "10.10.110.201:0/59797695", "metadata": { "arch": "x86_64", "ceph_release": "pacific", "ceph_version": "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)", "ceph_version_short": "16.2.11", "cpu": "Intel(R) Xeon(R) Gold 5215 CPU @ 2.50GHz", "distro": "ubuntu", "distro_description": "Ubuntu 20.04.5 LTS", "distro_version": "20.04", "frontend_config#0": "beast endpoint= 10.10.110.201:8080", "frontend_type#0": "beast", "hostname": "ceph-osd3", "id": "ceph-osd3.rgw0", "kernel_description": "#154-Ubuntu SMP Thu Jan 5 17:03:22 UTC 2023", "kernel_version": "5.4.0-137-generic", "mem_swap_kb": "8388604", "mem_total_kb": "263556752", "num_handles": "1", "os": "Linux", "pid": "389458", "realm_id": "", "realm_name": "", "zone_id": "689f9b30-4380-439e-8e7c-3c2046079a2b", "zone_name": "default", "zonegroup_id": "c2d060fe-bd6c-4bfb-a0cd-596124765015", "zonegroup_name": "default" }, "task_status": {} }, "724322": { "start_epoch": 1628, "start_stamp": "2023-02-06T08:53:50.544397+0200", "gid": 724322, "addr": "10.10.110.198:0/3339657222", "metadata": { "arch": "x86_64", "ceph_release": "pacific", "ceph_version": "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)", "ceph_version_short": "16.2.11", "cpu": "Intel(R) Xeon(R) Gold 6240 CPU @ 2.60GHz", "distro": "ubuntu", "distro_description": "Ubuntu 20.04.5 LTS", "distro_version": "20.04", "frontend_config#0": "beast endpoint= 10.10.110.198:8080", "frontend_type#0": "beast", "hostname": "ceph-mon3", "id": "ceph-mon3.rgw0", "kernel_description": "#154-Ubuntu SMP Thu Jan 5 17:03:22 UTC 2023", "kernel_version": "5.4.0-137-generic", "mem_swap_kb": "8388604", "mem_total_kb": "97474404", "num_handles": "1", "os": "Linux", "pid": "51926", "realm_id": "", "realm_name": "", "zone_id": "689f9b30-4380-439e-8e7c-3c2046079a2b", "zone_name": "default", "zonegroup_id": "c2d060fe-bd6c-4bfb-a0cd-596124765015", "zonegroup_name": "default" }, "task_status": {} }, "774174": { "start_epoch": 1711, "start_stamp": "2023-02-06T09:35:03.106044+0200", "gid": 774174, "addr": "10.10.110.196:0/3043825332", "metadata": { "arch": "x86_64", "ceph_release": "pacific", "ceph_version": "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)", "ceph_version_short": "16.2.11", "cpu": "Intel(R) Xeon(R) Gold 6240 CPU @ 2.60GHz", "distro": "ubuntu", "distro_description": "Ubuntu 20.04.5 LTS", "distro_version": "20.04", "frontend_config#0": "beast endpoint= 10.10.110.196:8080", "frontend_type#0": "beast", "hostname": "ceph-mon1", "id": "ceph-mon1.rgw0", "kernel_description": "#154-Ubuntu SMP Thu Jan 5 17:03:22 UTC 2023", "kernel_version": "5.4.0-137-generic", "mem_swap_kb": "8388604", "mem_total_kb": "97474404", "num_handles": "1", "os": "Linux", "pid": "53265", "realm_id": "", "realm_name": "", "zone_id": "689f9b30-4380-439e-8e7c-3c2046079a2b", "zone_name": "default", "zonegroup_id": "c2d060fe-bd6c-4bfb-a0cd-596124765015", "zonegroup_name": "default" }, "task_status": {} }, "784198": { "start_epoch": 1714, "start_stamp": "2023-02-06T09:35:23.974089+0200", "gid": 784198, "addr": "10.10.110.197:0/2503354619", "metadata": { "arch": "x86_64", "ceph_release": "pacific", "ceph_version": "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) pacific (stable)", "ceph_version_short": "16.2.11", "cpu": "Intel(R) Xeon(R) Gold 6240 CPU @ 2.60GHz", "distro": "ubuntu", "distro_description": "Ubuntu 20.04.5 LTS", "distro_version": "20.04", "frontend_config#0": "beast endpoint= 10.10.110.197:8080", "frontend_type#0": "beast", "hostname": "ceph-mon2", "id": "ceph-mon2.rgw0", "kernel_description": "#154-Ubuntu SMP Thu Jan 5 17:03:22 UTC 2023", "kernel_version": "5.4.0-137-generic", "mem_swap_kb": "8388604", "mem_total_kb": "97474408", "num_handles": "1", "os": "Linux", "pid": "152772", "realm_id": "", "realm_name": "", "zone_id": "689f9b30-4380-439e-8e7c-3c2046079a2b", "zone_name": "default", "zonegroup_id": "c2d060fe-bd6c-4bfb-a0cd-596124765015", "zonegroup_name": "default" }, "task_status": {} } } } } } root@ceph-mon1:~# Michel On Mon, Feb 6, 2023 at 3:32 PM Michel Niyoyita <micou12@xxxxxxxxx> wrote: > Hello Eugen > > Below are rgw configs and logs while I am accessing the dashboard : > > root@ceph-mon1:/var/log/ceph# tail -f /var/log/ceph/ceph-mgr.ceph-mon1.log > 2023-02-06T15:25:30.037+0200 7f68b15cd700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.134 - - > [06/Feb/2023:15:25:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:25:45.033+0200 7f68b0dcc700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.133 - - > [06/Feb/2023:15:25:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:25:45.033+0200 7f68b1dce700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:127.0.0.1 - - > [06/Feb/2023:15:25:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:25:45.037+0200 7f68b35d1700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.134 - - > [06/Feb/2023:15:25:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:00.033+0200 7f68b3dd2700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:127.0.0.1 - - > [06/Feb/2023:15:26:00] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:00.033+0200 7f68b25cf700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.133 - - > [06/Feb/2023:15:26:00] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:00.037+0200 7f68b2dd0700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.134 - - > [06/Feb/2023:15:26:00] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:15.033+0200 7f68afdca700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:127.0.0.1 - - > [06/Feb/2023:15:26:15] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:15.033+0200 7f68b45d3700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.133 - - > [06/Feb/2023:15:26:15] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:15.037+0200 7f68b05cb700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.134 - - > [06/Feb/2023:15:26:15] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:30.033+0200 7f68b15cd700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:127.0.0.1 - - > [06/Feb/2023:15:26:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:30.033+0200 7f68b0dcc700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.133 - - > [06/Feb/2023:15:26:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:30.037+0200 7f68b1dce700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.134 - - > [06/Feb/2023:15:26:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:45.033+0200 7f68b35d1700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:127.0.0.1 - - > [06/Feb/2023:15:26:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:45.033+0200 7f68b3dd2700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.133 - - > [06/Feb/2023:15:26:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > 2023-02-06T15:26:45.037+0200 7f68b25cf700 0 [prometheus INFO > cherrypy.access.140087714875184] ::ffff:10.10.110.134 - - > [06/Feb/2023:15:26:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2" > > > > [mons] > ceph-mon1 > ceph-mon2 > ceph-mon3 > > [osds] > ceph-osd1 > ceph-osd2 > ceph-osd3 > > [mgrs] > ceph-mon1 > ceph-mon2 > ceph-mon3 > > [grafana-server] > ceph-mon1 > ceph-mon2 > ceph-mon3 > > [rgws] > ceph-osd1 > ceph-osd2 > ceph-osd3 > ceph-mon1 > ceph-mon2 > ceph-mon3 > > [rgwloadbalancers] > ceph-osd1 > ceph-osd2 > ceph-osd3 > ceph-mon1 > ceph-mon2 > ceph-mon3 > > > ceph.conf: > > [client] > rbd_default_features = 1 > > [client.rgw.ceph-mon1.rgw0] > host = ceph-mon1 > keyring = /var/lib/ceph/radosgw/ceph-rgw.ceph-mon1.rgw0/keyring > log file = /var/log/ceph/ceph-rgw-ceph-mon1.rgw0.log > rgw frontends = beast endpoint=10.10.110.198:8080 > rgw frontends = beast endpoint=10.10.110.196:8080 > rgw thread pool size = 512 > > [client.rgw.ceph-osd1] > rgw_dns_name = ceph-osd1 > > [client.rgw.ceph-osd2] > rgw_dns_name = ceph-osd2 > > [client.rgw.ceph-osd3] > rgw_dns_name = ceph-osd3 > > # Please do not change this file directly since it is managed by Ansible > and will be overwritten > [global] > auth_client_required = cephx > auth_cluster_required = cephx > auth_service_required = cephx > cluster network = 10.10.110.128/26 > fsid = cb0caedc-eb5b-42d1-a34f-96facfda8c27 > mon host = > mon initial members = ceph-mon1,ceph-mon2,ceph-mon3 > mon_allow_pool_delete = True > mon_max_pg_per_osd = 400 > osd pool default crush rule = -1 > osd_pool_default_min_size = 2 > osd_pool_default_size = 3 > public network = > > > Best Regards > > > > > > > > > On Mon, Feb 6, 2023 at 3:13 PM Eugen Block <eblock@xxxxxx> wrote: > >> What does the active mgr log when you try to access the dashboard? >> Please paste your rgw config settings as well. >> >> Zitat von Michel Niyoyita <micou12@xxxxxxxxx>: >> >> > Hello Robert >> > >> > below is the output of ceph versions command >> > >> > root@ceph-mon1:~# ceph versions >> > { >> > "mon": { >> > "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) >> > pacific (stable)": 3 >> > }, >> > "mgr": { >> > "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) >> > pacific (stable)": 3 >> > }, >> > "osd": { >> > "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) >> > pacific (stable)": 48 >> > }, >> > "mds": {}, >> > "rgw": { >> > "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) >> > pacific (stable)": 6 >> > }, >> > "overall": { >> > "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) >> > pacific (stable)": 60 >> > } >> > } >> > root@ceph-mon1:~# >> > >> > Best Regards >> > >> > Michel >> > >> > On Mon, Feb 6, 2023 at 2:57 PM Robert Sander < >> r.sander@xxxxxxxxxxxxxxxxxxx> >> > wrote: >> > >> >> On 06.02.23 13:48, Michel Niyoyita wrote: >> >> >> >> > root@ceph-mon1:~# ceph -v >> >> > ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894) >> pacific >> >> > (stable) >> >> >> >> This is the version of the command line tool "ceph". >> >> >> >> Please run "ceph versions" to show the version of the running Ceph >> daemons. >> >> >> >> Regards >> >> -- >> >> Robert Sander >> >> Heinlein Support GmbH >> >> Linux: Akademie - Support - Hosting >> >> http://www.heinlein-support.de >> >> >> >> Tel: 030-405051-43 >> >> Fax: 030-405051-19 >> >> >> >> Zwangsangaben lt. §35a GmbHG: >> >> HRB 93818 B / Amtsgericht Berlin-Charlottenburg, >> >> Geschäftsführer: Peer Heinlein -- Sitz: Berlin >> >> _______________________________________________ >> >> ceph-users mailing list -- ceph-users@xxxxxxx >> >> To unsubscribe send an email to ceph-users-leave@xxxxxxx >> >> >> > _______________________________________________ >> > ceph-users mailing list -- ceph-users@xxxxxxx >> > To unsubscribe send an email to ceph-users-leave@xxxxxxx >> >> >> _______________________________________________ >> ceph-users mailing list -- ceph-users@xxxxxxx >> To unsubscribe send an email to ceph-users-leave@xxxxxxx >> > _______________________________________________ ceph-users mailing list -- ceph-users@xxxxxxx To unsubscribe send an email to ceph-users-leave@xxxxxxx