Re: Removing Rados Gateway in ceph cluster

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hello Eugen

Below are rgw configs and logs while I am accessing the dashboard :

root@ceph-mon1:/var/log/ceph# tail -f /var/log/ceph/ceph-mgr.ceph-mon1.log
2023-02-06T15:25:30.037+0200 7f68b15cd700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.134 - -
[06/Feb/2023:15:25:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:25:45.033+0200 7f68b0dcc700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.133 - -
[06/Feb/2023:15:25:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:25:45.033+0200 7f68b1dce700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:127.0.0.1 - -
[06/Feb/2023:15:25:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:25:45.037+0200 7f68b35d1700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.134 - -
[06/Feb/2023:15:25:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:00.033+0200 7f68b3dd2700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:127.0.0.1 - -
[06/Feb/2023:15:26:00] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:00.033+0200 7f68b25cf700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.133 - -
[06/Feb/2023:15:26:00] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:00.037+0200 7f68b2dd0700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.134 - -
[06/Feb/2023:15:26:00] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:15.033+0200 7f68afdca700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:127.0.0.1 - -
[06/Feb/2023:15:26:15] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:15.033+0200 7f68b45d3700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.133 - -
[06/Feb/2023:15:26:15] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:15.037+0200 7f68b05cb700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.134 - -
[06/Feb/2023:15:26:15] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:30.033+0200 7f68b15cd700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:127.0.0.1 - -
[06/Feb/2023:15:26:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:30.033+0200 7f68b0dcc700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.133 - -
[06/Feb/2023:15:26:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:30.037+0200 7f68b1dce700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.134 - -
[06/Feb/2023:15:26:30] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:45.033+0200 7f68b35d1700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:127.0.0.1 - -
[06/Feb/2023:15:26:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:45.033+0200 7f68b3dd2700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.133 - -
[06/Feb/2023:15:26:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"
2023-02-06T15:26:45.037+0200 7f68b25cf700  0 [prometheus INFO
cherrypy.access.140087714875184] ::ffff:10.10.110.134 - -
[06/Feb/2023:15:26:45] "GET /metrics HTTP/1.1" 200 - "" "Prometheus/2.7.2"



[mons]
ceph-mon1
ceph-mon2
ceph-mon3

[osds]
ceph-osd1
ceph-osd2
ceph-osd3

[mgrs]
ceph-mon1
ceph-mon2
ceph-mon3

[grafana-server]
ceph-mon1
ceph-mon2
ceph-mon3

[rgws]
ceph-osd1
ceph-osd2
ceph-osd3
ceph-mon1
ceph-mon2
ceph-mon3

[rgwloadbalancers]
ceph-osd1
ceph-osd2
ceph-osd3
ceph-mon1
ceph-mon2
ceph-mon3


ceph.conf:

[client]
rbd_default_features = 1

[client.rgw.ceph-mon1.rgw0]
host = ceph-mon1
keyring = /var/lib/ceph/radosgw/ceph-rgw.ceph-mon1.rgw0/keyring
log file = /var/log/ceph/ceph-rgw-ceph-mon1.rgw0.log
rgw frontends = beast endpoint=10.10.110.198:8080
rgw frontends = beast endpoint=10.10.110.196:8080
rgw thread pool size = 512

[client.rgw.ceph-osd1]
rgw_dns_name = ceph-osd1

[client.rgw.ceph-osd2]
rgw_dns_name = ceph-osd2

[client.rgw.ceph-osd3]
rgw_dns_name = ceph-osd3

# Please do not change this file directly since it is managed by Ansible
and will be overwritten
[global]
auth_client_required = cephx
auth_cluster_required = cephx
auth_service_required = cephx
cluster network = 10.10.110.128/26
fsid = cb0caedc-eb5b-42d1-a34f-96facfda8c27
mon host =
mon initial members = ceph-mon1,ceph-mon2,ceph-mon3
mon_allow_pool_delete = True
mon_max_pg_per_osd = 400
osd pool default crush rule = -1
osd_pool_default_min_size = 2
osd_pool_default_size = 3
public network =


Best Regards








On Mon, Feb 6, 2023 at 3:13 PM Eugen Block <eblock@xxxxxx> wrote:

> What does the active mgr log when you try to access the dashboard?
> Please paste your rgw config settings as well.
>
> Zitat von Michel Niyoyita <micou12@xxxxxxxxx>:
>
> > Hello Robert
> >
> > below is the output of ceph versions command
> >
> > root@ceph-mon1:~# ceph versions
> > {
> >     "mon": {
> >         "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894)
> > pacific (stable)": 3
> >     },
> >     "mgr": {
> >         "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894)
> > pacific (stable)": 3
> >     },
> >     "osd": {
> >         "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894)
> > pacific (stable)": 48
> >     },
> >     "mds": {},
> >     "rgw": {
> >         "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894)
> > pacific (stable)": 6
> >     },
> >     "overall": {
> >         "ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894)
> > pacific (stable)": 60
> >     }
> > }
> > root@ceph-mon1:~#
> >
> > Best Regards
> >
> > Michel
> >
> > On Mon, Feb 6, 2023 at 2:57 PM Robert Sander <
> r.sander@xxxxxxxxxxxxxxxxxxx>
> > wrote:
> >
> >> On 06.02.23 13:48, Michel Niyoyita wrote:
> >>
> >> > root@ceph-mon1:~# ceph -v
> >> > ceph version 16.2.11 (3cf40e2dca667f68c6ce3ff5cd94f01e711af894)
> pacific
> >> > (stable)
> >>
> >> This is the version of the command line tool "ceph".
> >>
> >> Please run "ceph versions" to show the version of the running Ceph
> daemons.
> >>
> >> Regards
> >> --
> >> Robert Sander
> >> Heinlein Support GmbH
> >> Linux: Akademie - Support - Hosting
> >> http://www.heinlein-support.de
> >>
> >> Tel: 030-405051-43
> >> Fax: 030-405051-19
> >>
> >> Zwangsangaben lt. §35a GmbHG:
> >> HRB 93818 B / Amtsgericht Berlin-Charlottenburg,
> >> Geschäftsführer: Peer Heinlein  -- Sitz: Berlin
> >> _______________________________________________
> >> ceph-users mailing list -- ceph-users@xxxxxxx
> >> To unsubscribe send an email to ceph-users-leave@xxxxxxx
> >>
> > _______________________________________________
> > ceph-users mailing list -- ceph-users@xxxxxxx
> > To unsubscribe send an email to ceph-users-leave@xxxxxxx
>
>
> _______________________________________________
> ceph-users mailing list -- ceph-users@xxxxxxx
> To unsubscribe send an email to ceph-users-leave@xxxxxxx
>
_______________________________________________
ceph-users mailing list -- ceph-users@xxxxxxx
To unsubscribe send an email to ceph-users-leave@xxxxxxx




[Index of Archives]     [Information on CEPH]     [Linux Filesystem Development]     [Ceph Development]     [Ceph Large]     [Ceph Dev]     [Linux USB Development]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [xfs]


  Powered by Linux