将设为首页浏览此站
开启辅助访问 天气与日历 收藏本站联系我们切换到窄版

易陆发现论坛

 找回密码
 开始注册
查看: 381|回复: 1
收起左侧

ceph 分布式存储同步过程中需要一次采坑过程,因为时间不同步导致

[复制链接]
发表于 2021-8-31 15:00:09 | 显示全部楼层 |阅读模式

马上注册,结交更多好友,享用更多功能,让你轻松玩转社区。

您需要 登录 才可以下载或查看,没有帐号?开始注册

x
[root@compute01 deploy]# ceph status  
) j4 I- j( H& u" g& K. i: `  cluster:
) e: H# F, `& a" [, {8 l8 U+ s: d    id:     31403b11-8a1e-432f-876e-5a2c852f9dcc
  H/ {3 O/ x; N    health: HEALTH_WARN. d6 k8 {8 q; J/ h6 T. X
            Degraded data redundancy: 13701/41103 objects degraded (33.333%), 432 pgs degraded, 640 pgs undersized
! r) F) Y3 r1 q# l  Q4 f3 d            13269 slow ops, oldest one blocked for 8178 sec, mon.compute02 has slow ops- v- p1 ?5 }8 L9 e; D
            clock skew detected on mon.compute02: h5 w7 i9 _# C

& E% A4 p' w, W3 l5 _  services:
* Z& i0 V( z8 j8 _; h    mon: 3 daemons, quorum compute01,compute02,compute03 (age 2h): k7 ]7 f0 C$ p8 }( p, t
    mgr: compute01(active, since 4h), standbys: compute03, compute026 H9 E$ H. j, F) |* R9 P
    osd: 3 osds: 2 up (since 4h), 2 in (since 2h)& y/ x3 ^, t2 m- k) \
0 ^( E2 V4 b  a' Q. t) p
  data:
, w% N" x0 e0 w. W' M7 T    pools:   6 pools, 640 pgs
8 i9 t, m3 O! [    objects: 13.70k objects, 11 GiB# ~( P7 O9 a$ _8 Y  x, k& i" [
    usage:   24 GiB used, 2.7 TiB / 2.7 TiB avail
  U; |, A+ j8 l4 Z5 V1 t' q' Q6 `    pgs:     13701/41103 objects degraded (33.333%)
/ s, [5 u' w$ A" T             432 active+undersized+degraded. k+ L* x4 `# K# [  {: S
             208 active+undersized
2 g& S0 E1 k5 @! m9 E
: L+ x) {! J7 }$ p  A+ n' R% [8 F[root@compute01 deploy]# ceph osd crush remove osd.0$ `- L3 `/ _. U1 L! \$ ^  ]& C5 |
removed item id 0 name 'osd.0' from crush map8 {4 Y; Z: t& p: A
[root@compute01 deploy]# ceph osd rm osd.0) {+ ^9 v8 m- `& D9 w$ H
removed osd.03 H1 {' f3 F0 x1 l; ~( \: f
[root@compute01 deploy]# ceph auth del osd.0
- v$ e: H$ G/ c  Lupdated
! S) g2 b% O; z8 K6 P) b[root@compute01 deploy]# history |grep sdb
" J0 c3 x0 O7 }2 }5 s 1023  history |grep sdb$ O8 \' G7 b* m' q
[root@compute01 deploy]# history |grep ceph-deploy
- r! f& y4 x2 o! r. r- G/ f 1024  history |grep ceph-deploy
5 @# R) n' i' S# O" @" {9 j- J[root@compute01 deploy]#  ceph-deploy osd create compute02 --data /dev/sdb* e. q3 m4 m$ x- U+ ^
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf; ^# y* }' G+ |" _' {/ P* I
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create compute02 --data /dev/sdb, S, C' n5 D! o0 K: U5 ^
[ceph_deploy.cli][INFO  ] ceph-deploy options:1 V) J% c$ T& E0 E/ g( {( {6 _' u; Q
[ceph_deploy.cli][INFO  ]  verbose                       : False
3 Y  @- P( b% G: x! g, B& S[ceph_deploy.cli][INFO  ]  bluestore                     : None6 p" a, G+ J  \3 c# k8 o+ n
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7fbd39643690>
7 H7 V$ [* N4 B4 d* G2 J% x- y/ o[ceph_deploy.cli][INFO  ]  cluster                       : ceph( _: c* d  c# \  z& b" h6 N+ t
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs0 @, J2 G) p/ s, l
[ceph_deploy.cli][INFO  ]  block_wal                     : None3 m* s3 c: {7 d5 \+ S
[ceph_deploy.cli][INFO  ]  default_release               : False/ X0 A2 e5 n! u- p( c. O5 X
[ceph_deploy.cli][INFO  ]  username                      : None
6 o5 L8 l- v, _[ceph_deploy.cli][INFO  ]  journal                       : None
1 y0 T( Y, e/ x. w" F. L  I+ t[ceph_deploy.cli][INFO  ]  subcommand                    : create3 O% u0 h, u7 x0 @
[ceph_deploy.cli][INFO  ]  host                          : compute027 h, g, `" l6 C/ B4 F* n9 A
[ceph_deploy.cli][INFO  ]  filestore                     : None2 R- Z% N' B! c# C7 a
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fbd396711b8>* Z. z4 }, g$ l6 w) s  m
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
" ?8 s1 Y; n2 M[ceph_deploy.cli][INFO  ]  zap_disk                      : False
9 W; }! s( s2 x2 k9 W5 L[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb
: Y- d4 t5 j% S& Q[ceph_deploy.cli][INFO  ]  block_db                      : None* L5 d. C1 N9 p( F2 v8 w+ o" A/ J
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False0 B! N4 b5 s. ~# h3 R/ Y" e" t
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False, [4 y+ n/ T  ^( b# c% o! A
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys) y/ N, z4 @" C! Z" U0 |/ Z3 T: G# K' P
[ceph_deploy.cli][INFO  ]  quiet                         : False; F; T" L+ E) N$ r+ |, g. ^
[ceph_deploy.cli][INFO  ]  debug                         : False
, v  q: E4 j" ?8 J, O[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
7 t4 C9 Z! C! d$ u5 E/ h[compute02][DEBUG ] connected to host: compute02
- ?6 h$ k1 g  I* l/ D4 s6 ^[compute02][DEBUG ] detect platform information from remote host
- t6 {5 H8 F! q0 Y8 w[compute02][DEBUG ] detect machine type0 h( W; C1 s% ]! R% j( i# c
[compute02][DEBUG ] find the location of an executable
* I: a( s. p1 L[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core
9 S4 |+ h4 h- M, \& N5 ]1 q# K[ceph_deploy.osd][DEBUG ] Deploying osd to compute02/ F* A* l  L+ Z5 u+ Z: {4 a
[compute02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf5 x9 B0 W& D' b3 m' K0 C
[compute02][DEBUG ] find the location of an executable
% \9 z1 U7 j1 d5 o# A4 I[compute02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb2 d; A" y6 N1 ^$ [( Z
[compute02][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key/ @# b; q- p2 C
[compute02][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 8bb22eb2-d2e1-4ae4-aa52-cc96422b25bb
! d6 O7 Y. p- V, o- D$ t6 b  N
- Y4 ]+ j6 S9 G/ k* r- _^CKilled by signal 2.
" }/ b/ b2 Z1 u[ceph_deploy][ERROR ] KeyboardInterrupt# r3 M9 m3 D. l) v  b: j
[root@compute01 deploy]# ls
0 z. f' h& y. y: z! ]" yceph.bootstrap-mds.keyring  ceph.bootstrap-osd.keyring  ceph.client.admin.keyring  ceph-deploy-ceph.log
+ ^9 ?& P* |+ L" M: |ceph.bootstrap-mgr.keyring  ceph.bootstrap-rgw.keyring  ceph.conf                  ceph.mon.keyring
) O4 L; E& }9 V. {. r) W[root@compute01 deploy]# , Q, p" t# p4 e2 O
[root@compute01 deploy]# 1 L0 ~. z; k6 ~& R& j* T& G
[root@compute01 deploy]# pwd
9 Z. U& v* s) O/ [) X/ceph/deploy! f2 U7 t5 _7 k
[root@compute01 deploy]# ceph-deploy osd create compute02 --data /dev/sdb  ^C
: B; H2 j, K: |& F: m, {& B; B[root@compute01 deploy]#  ceph-deploy disk zap compute02 /dev/sdb
0 |$ a# V& A  t7 ?4 W[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
2 o4 h3 I6 K' d1 @8 u: B1 D[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap compute02 /dev/sdb6 }/ _4 _4 u( f8 q/ {& r! [
[ceph_deploy.cli][INFO  ] ceph-deploy options:
# E; f* X! b8 J& W$ j% Q! a[ceph_deploy.cli][INFO  ]  username                      : None+ B% j& F2 q: Q
[ceph_deploy.cli][INFO  ]  verbose                       : False4 \) D" \- O' v! A0 K, N# I3 ]2 L
[ceph_deploy.cli][INFO  ]  debug                         : False6 E5 D5 R: K$ I9 n; m* y! o5 j7 ^
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
! }# G% Y) L/ |& t& z9 b. ]4 s[ceph_deploy.cli][INFO  ]  subcommand                    : zap
! E7 A: A, ^7 y1 w6 O5 R9 g[ceph_deploy.cli][INFO  ]  quiet                         : False
5 ~/ S6 V$ M; `0 |[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7f0db9e1dad0>9 O  v3 y& W& e* f# a% \  P
[ceph_deploy.cli][INFO  ]  cluster                       : ceph! }' g8 n2 A$ o/ d) z6 w
[ceph_deploy.cli][INFO  ]  host                          : compute02
# J% s$ V0 @# x( R+ R$ s[ceph_deploy.cli][INFO  ]  func                          : <function disk at 0x7f0db9de3230>& E# w) a$ W# Z, I
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
! y) q/ K2 U' W& Z[ceph_deploy.cli][INFO  ]  default_release               : False
9 W" D; h. L8 {1 u5 p6 y. M4 s[ceph_deploy.cli][INFO  ]  disk                          : ['/dev/sdb']" q" I+ \% [8 [7 h( ]. l+ p. v
[ceph_deploy.osd][DEBUG ] zapping /dev/sdb on compute02
: C" ]9 P  j" R4 K[compute02][DEBUG ] connected to host: compute02
* M! z) U8 s1 @2 _$ p9 `[compute02][DEBUG ] detect platform information from remote host- K4 J% W% K" _8 c% Y% F
[compute02][DEBUG ] detect machine type
# J0 S. K# R: z. @* K& n$ `- ?/ f[compute02][DEBUG ] find the location of an executable
: b5 S& z4 Q0 X5 \! k[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core& G0 V. R" M5 ]* Y+ O+ ^
[compute02][DEBUG ] zeroing last few blocks of device; z, t2 ?! x. Y, U' ~9 h
[compute02][DEBUG ] find the location of an executable5 E2 l  `( g( b/ F* ^# P1 y- i
[compute02][INFO  ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdb; A: a) Y8 w! C0 Y' w) @6 F
[compute02][WARNIN] --> Zapping: /dev/sdb
% C! R* E$ D1 U  \. M3 ~[compute02][WARNIN] --> --destroy was not specified, but zapping a whole device will remove the partition table
  ^; Q, r- @0 |2 `7 u[compute02][WARNIN] Running command: /usr/bin/dd if=/dev/zero of=/dev/sdb bs=1M count=10 conv=fsync
3 p" J! S% P4 R+ e' j[compute02][WARNIN]  stderr: 10+0 records in
0 w, q  g. Z( x( E0 C8 ~8 {[compute02][WARNIN] 10+0 records out
. X9 ?; g0 j5 Y6 {3 x- Z$ R# o[compute02][WARNIN] 10485760 bytes (10 MB) copied6 \) o. A" Y  c. i
[compute02][WARNIN]  stderr: , 0.0850991 s, 123 MB/s' m6 z  \& k5 ~3 }+ r1 M
[compute02][WARNIN] --> Zapping successful for: <Raw Device: /dev/sdb>- P* ]( v  z% f2 L& k' ]  `, _; j# V
[root@compute01 deploy]#  ceph-deploy osd create compute02 --data /dev/sdb; u+ H# R8 W* |: m
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf! N- V3 {1 t* ]
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create compute02 --data /dev/sdb. H) f7 F3 f% G5 t9 g
[ceph_deploy.cli][INFO  ] ceph-deploy options:
9 O% T0 i% \& a. d/ m[ceph_deploy.cli][INFO  ]  verbose                       : False1 {& C; u) B* }1 H' T
[ceph_deploy.cli][INFO  ]  bluestore                     : None8 T' f$ O, @$ @8 H! i
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7f45a0a92690>9 N2 T0 n# M6 |1 E3 B' x3 U
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
2 B" y0 ^% d9 d[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
; z) n8 F3 c' v[ceph_deploy.cli][INFO  ]  block_wal                     : None$ ~6 |& h4 N/ @, p8 R4 \# T
[ceph_deploy.cli][INFO  ]  default_release               : False
3 Z4 U, E+ J9 r5 L0 ~( [5 N$ B' Q% r[ceph_deploy.cli][INFO  ]  username                      : None- r6 v6 k' a# k- u; w
[ceph_deploy.cli][INFO  ]  journal                       : None
4 f. q) m+ Z( j2 W8 c7 F! L" A- n6 l[ceph_deploy.cli][INFO  ]  subcommand                    : create: d: Z! }, w! p. p  \0 e
[ceph_deploy.cli][INFO  ]  host                          : compute02; s: q) O/ n1 R* O; n% M4 i
[ceph_deploy.cli][INFO  ]  filestore                     : None" r# p! E5 m9 x( W( R) A
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f45a0ac01b8>( ?3 ~/ B, q# z% D* ~( E
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
  G: f/ G* j5 f( N* @2 _; j5 A[ceph_deploy.cli][INFO  ]  zap_disk                      : False3 x" o" m& Q/ }6 m# w
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb
0 C8 k4 G, [; n$ m% m[ceph_deploy.cli][INFO  ]  block_db                      : None
3 f& X# f4 ^' H: E+ W[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
- G! L2 t! u2 n* X. p9 n2 ^[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
; Y6 p; a+ Y- p% C$ T[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys( {. @! |- H0 V6 ^  m1 ?8 G& N9 C
[ceph_deploy.cli][INFO  ]  quiet                         : False! O8 Q! {/ u9 S# }3 m; `
[ceph_deploy.cli][INFO  ]  debug                         : False
  u; s/ W! a, G5 W/ ^  ][ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb0 U- V( K5 V8 n# |6 D; S# f- m7 Q2 h
[compute02][DEBUG ] connected to host: compute02
' Y9 E4 Q0 n6 o/ m9 G[compute02][DEBUG ] detect platform information from remote host
6 T+ b6 L6 R: Q% Z3 }[compute02][DEBUG ] detect machine type' P; \" p& I! L( u& [
[compute02][DEBUG ] find the location of an executable: p; B8 d4 D' g/ j# H# a5 ]6 }( [
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core7 R+ X0 D6 e. |/ W% r" C' M
[ceph_deploy.osd][DEBUG ] Deploying osd to compute02
' ?" m4 z: w2 m5 H. E[compute02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
  s" \* l7 j2 Y! {+ G5 E6 h4 u. H! g4 s[compute02][DEBUG ] find the location of an executable
- T% k9 Q9 Q# j9 `3 G[compute02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb% z: Y2 i  J6 q; s7 M) b3 q
[compute02][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key3 o- _5 ~4 p! g0 g& `9 ?% i
[compute02][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 9f1fca57-1a1c-4d30-bad8-2d4578f88e96) N0 c+ q7 C0 \
^CKilled by signal 2.
. R+ B- `0 l" o0 c% K[ceph_deploy][ERROR ] KeyboardInterrupt$ M% h, [- w" {/ a% h
[root@compute01 deploy]# ls
. r0 l% O0 L: q) `8 kceph.bootstrap-mds.keyring  ceph.bootstrap-osd.keyring  ceph.client.admin.keyring  ceph-deploy-ceph.log3 |+ F, @5 _0 I$ w
ceph.bootstrap-mgr.keyring  ceph.bootstrap-rgw.keyring  ceph.conf                  ceph.mon.keyring' q! u! U2 C# _; ^- h8 u) S- f" N
[root@compute01 deploy]#
6 s' y& z1 n$ W0 D$ y! J[root@compute01 deploy]#
# |+ q! T2 ^$ J, R, X% f+ }1 A[root@compute01 deploy]#
; G) ~( Y; l1 T; f8 l[root@compute01 deploy]# ceph-de
! L% {. T' q4 L" f' o5 o; S2 uceph-dencoder  ceph-deploy    $ Y3 {4 z; X9 Y. U0 X# U, f
[root@compute01 deploy]# ceph-deploy
2 ^; r, L# c) t( l7 J+ e; t1 kusage: ceph-deploy [-h] [-v | -q] [--version] [--username USERNAME]
. H4 S  i; A. \! J                   [--overwrite-conf] [--ceph-conf CEPH_CONF]' j9 U# {$ f2 Y+ @  e8 E
                   COMMAND ...% l/ f- G6 k) z; E6 L
Easy Ceph deployment' \  e+ y; e# N* {  [. i5 j: {
    -^-; W/ x$ i/ j. a% x7 y4 c( ^+ ?
   /   \
) l1 x, y# w, B( U   |O o|  ceph-deploy v2.0.1& n! S  c; t+ i( j0 [- j
   ).-.(
1 z3 E+ r/ T9 @) v  '/|||\`
8 m5 N0 _/ f/ S4 w+ j/ G% Y! X  | '|` |$ i% U3 S- r! c, {0 O& H
    '|`
6 Q# y- Q2 y7 u" t! {2 mFull documentation can be found at: http://ceph.com/ceph-deploy/docs
9 O. N, h  R+ l7 q  }$ ~optional arguments:% B# i+ y+ l" m; F
  -h, --help            show this help message and exit
4 a# Z, _6 ~' @2 G  -v, --verbose         be more verbose( D% v  O/ A0 F$ ]
  -q, --quiet           be less verbose1 p2 o0 c3 \; T1 \  P
  --version             the current installed version of ceph-deploy* z2 d" P6 h3 ~% l. H5 n
  --username USERNAME   the username to connect to the remote host7 R6 n- d$ R# @4 \' s% U0 t
  --overwrite-conf      overwrite an existing conf file on remote host (if6 V0 k& ]. m1 f3 a9 e' X7 z
                        present)- y; A8 y6 X, c1 m! @1 p
  --ceph-conf CEPH_CONF$ ]* s9 k$ _+ ~8 m
                        use (or reuse) a given ceph.conf file0 @$ b9 ^: L: t4 ?6 b
commands:
8 q; W7 Y; c; F6 p# ]$ e* {+ t# N  COMMAND               description: _4 ]* a6 e# f2 j
    new                 Start deploying a new cluster, and write a
' H% J: S6 u3 Z                        CLUSTER.conf and keyring for it.
' I- i: A7 a$ \' `    install             Install Ceph packages on remote hosts.1 `2 y( h8 g/ {# t$ h! Z% k( I; {
    rgw                 Ceph RGW daemon management
6 N  I3 J. B9 @5 v  W5 p9 Z    mgr                 Ceph MGR daemon management3 p/ T" R2 V8 ^+ }! ]; d; o3 m
    mds                 Ceph MDS daemon management
# V' E- i2 |2 |* m0 B    mon                 Ceph MON Daemon management
6 [% `0 p8 P# D5 _& }# h* x+ v    gatherkeys          Gather authentication keys for provisioning new nodes./ `: \' v# X4 I* p. G
    disk                Manage disks on a remote host.
4 ~1 c1 C1 X4 T9 b. _; ?. U  z    osd                 Prepare a data disk on remote host.! z& q2 P/ {* S+ L& @1 f
    repo                Repo definition management. F5 M" E9 S- I1 [6 t8 R; W
    admin               Push configuration and client.admin key to a remote: P: x. m* h& b- P- |6 m6 o
                        host.( s: `! h) p( q- _3 Z' {: Z* M  {
    config              Copy ceph.conf to/from remote host(s)
5 T  v5 T- u+ t7 s1 {2 b    uninstall           Remove Ceph packages from remote hosts.8 D* s0 N  w1 {0 w; k
    purgedata           Purge (delete, destroy, discard, shred) any Ceph data) J- g' K3 @/ r2 r
                        from /var/lib/ceph
! S+ O, ~' W. T" x" T  W# T% X    purge               Remove Ceph packages from remote hosts and purge all: x% u7 f' {; \! `! v
                        data.
: o! Q% c2 @) V4 }7 w4 ]$ U    forgetkeys          Remove authentication keys from the local directory., s5 _2 Z5 Z9 X6 Y* h/ o0 [7 `% B' `
    pkg                 Manage packages on remote hosts.7 c5 Q; Q$ x$ S( C+ C
    calamari            Install and configure Calamari nodes. Assumes that a
; j$ _; ?) K, [' o                        repository with Calamari packages is already% S# _& M! v9 K1 U  a6 [# Z
                        configured. Refer to the docs for examples
2 Y! ?) [  H: v  z7 p- I                        (http://ceph.com/ceph-deploy/docs/conf.html)
6 t+ w7 h: Q. N# \See 'ceph-deploy <command> --help' for help on a specific command9 ?1 K& {# x- R
[root@compute01 deploy]#  ceph-deploy disk zap compute02 /dev/sdb
2 s& ?9 `5 R' f3 h6 C+ Q[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
2 K- {& _7 D4 q! z[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap compute02 /dev/sdb' c6 h) h- J5 x
[ceph_deploy.cli][INFO  ] ceph-deploy options:
$ h: A  H8 O; s" ~* ^; K* }0 e[ceph_deploy.cli][INFO  ]  username                      : None- Q; v5 W) E  f0 p1 u
[ceph_deploy.cli][INFO  ]  verbose                       : False. V; Z# X# ~8 c
[ceph_deploy.cli][INFO  ]  debug                         : False5 p5 e! o; e2 b
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
: j2 v7 n8 s0 d0 T0 c) X  X2 g[ceph_deploy.cli][INFO  ]  subcommand                    : zap
) ~- z* [% G) `7 o# v2 G[ceph_deploy.cli][INFO  ]  quiet                         : False
, B' b( X5 ^$ V) o" |[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7f1c11596ad0>
1 d* j1 f. q" {, l$ x4 R$ \[ceph_deploy.cli][INFO  ]  cluster                       : ceph% u4 ?: t1 |( q% r# m" V
[ceph_deploy.cli][INFO  ]  host                          : compute02: W" i2 K8 c* l+ J
[ceph_deploy.cli][INFO  ]  func                          : <function disk at 0x7f1c1155c230>
% k  q, h$ p9 U, }% G[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
% K- r. g, U# `+ N[ceph_deploy.cli][INFO  ]  default_release               : False
+ s. K2 K$ S  }$ m7 r7 w[ceph_deploy.cli][INFO  ]  disk                          : ['/dev/sdb']+ B1 c9 ?; A9 n( {7 C, y$ f& \
[ceph_deploy.osd][DEBUG ] zapping /dev/sdb on compute02' i0 ^0 |/ v1 i! T- E- a
[compute02][DEBUG ] connected to host: compute02
4 N: w" r5 h" r8 u" ^; `[compute02][DEBUG ] detect platform information from remote host  e$ i/ L. v+ U+ L
[compute02][DEBUG ] detect machine type
8 d/ K" k4 K' y- e3 o* `5 M; P[compute02][DEBUG ] find the location of an executable
9 ]6 C6 j6 o! J8 T[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core1 [, H. M! F& E3 k' F2 m: p
[compute02][DEBUG ] zeroing last few blocks of device9 c0 U  y. A" I0 @; c
[compute02][DEBUG ] find the location of an executable+ m! q; j( D1 h
[compute02][INFO  ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdb+ Q4 T0 d7 Y% {5 [0 r
[compute02][WARNIN] --> Zapping: /dev/sdb1 o. ?5 W2 J% B* u* V
[compute02][WARNIN] --> --destroy was not specified, but zapping a whole device will remove the partition table
4 b8 B8 c8 @( D& w2 K0 P& l# r$ I[compute02][WARNIN] Running command: /usr/bin/dd if=/dev/zero of=/dev/sdb bs=1M count=10 conv=fsync
. D! d) Z; @% N. a* B2 ^[compute02][WARNIN]  stderr: 10+0 records in$ Y) j% H3 B) Y, K. p
[compute02][WARNIN] 10+0 records out/ ~- I; u) `) c) B% }( P3 Y
[compute02][WARNIN] 10485760 bytes (10 MB) copied
. m3 S; k  i8 [; T5 x( c; @0 c[compute02][WARNIN]  stderr: , 0.0591334 s, 177 MB/s
* v) }! s7 L2 o7 T& _6 T[compute02][WARNIN] --> Zapping successful for: <Raw Device: /dev/sdb>
+ A8 P7 R7 `+ w, @[root@compute01 deploy]#  ceph-deploy osd create compute02 --data /dev/sdb# ^$ x8 l# S$ Y2 F( `% w0 W
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf  f/ ^" G* o2 g, d2 S" t4 A% R
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create compute02 --data /dev/sdb
" {$ v, ]6 w# p& q* w! I[ceph_deploy.cli][INFO  ] ceph-deploy options:  V4 ~* N" y2 }! m( s; c2 C4 t
[ceph_deploy.cli][INFO  ]  verbose                       : False
9 U6 r' O% ]8 K& a" V1 i7 S[ceph_deploy.cli][INFO  ]  bluestore                     : None4 Z1 {9 k* D+ }6 x) g5 s
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7f23c40b9690>
( }- |: N2 U7 E0 s, R2 R[ceph_deploy.cli][INFO  ]  cluster                       : ceph0 l6 o# D; z% O* k
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
2 g2 d; F5 Y& A- ]: s[ceph_deploy.cli][INFO  ]  block_wal                     : None
, I; }% D7 V( ]- ^  m( f8 D  Q[ceph_deploy.cli][INFO  ]  default_release               : False
- L0 v4 q* w6 _* }" q[ceph_deploy.cli][INFO  ]  username                      : None
8 |- c9 f& V, f! Z4 T- x6 j[ceph_deploy.cli][INFO  ]  journal                       : None- M6 B5 `7 r) u- i4 x/ E8 c4 R
[ceph_deploy.cli][INFO  ]  subcommand                    : create
. J# q$ }" I+ ~. p3 o[ceph_deploy.cli][INFO  ]  host                          : compute02
- ~; v% i) k  y+ N4 B& K5 o[ceph_deploy.cli][INFO  ]  filestore                     : None2 u3 r$ \- l4 B0 k
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f23c40e71b8>
' A: J$ Q8 b5 M. s( a" W/ ]1 g. A[ceph_deploy.cli][INFO  ]  ceph_conf                     : None& O! g! E" _7 B, i& g
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
- J) ~6 N! v, I" }* n6 ^[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb& a7 h7 h4 S* l0 q: b! w
[ceph_deploy.cli][INFO  ]  block_db                      : None
( I8 b5 m, K$ p7 e/ [[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
0 E1 b) i5 N( g3 G$ \7 b[ceph_deploy.cli][INFO  ]  overwrite_conf                : False3 O6 C" j3 E' H; `  D
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
: R0 ~; A+ m5 A& |2 Y[ceph_deploy.cli][INFO  ]  quiet                         : False
7 [& L* y3 z4 K; q; S' ~[ceph_deploy.cli][INFO  ]  debug                         : False
( W" y5 ]/ K% G# s9 c[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb8 d" |8 B" C) ^6 e
[compute02][DEBUG ] connected to host: compute02 ; U( ^9 {) d/ C
[compute02][DEBUG ] detect platform information from remote host# j5 S! P$ B, h
[compute02][DEBUG ] detect machine type
% v3 S9 @/ Q  {% Y[compute02][DEBUG ] find the location of an executable1 X& y( e2 e' |) K3 F5 X: g
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core* P8 X/ h$ n: r4 K, x6 _' f% E
[ceph_deploy.osd][DEBUG ] Deploying osd to compute026 |( l9 ]2 p7 J# q2 x$ F" N
[compute02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
; @- @) `' n# j5 u[compute02][DEBUG ] find the location of an executable
! d9 [  w4 `% f/ m0 Z% i[compute02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb$ d( @8 D% }, p" n
[compute02][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key2 Y  @8 v6 P9 p. M( \1 E
[compute02][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new fab4f939-9a55-4e45-94d5-2434dbb24766
+ {2 R8 _7 [; g0 b^CKilled by signal 2.- J" z* N* N( `7 \
[ceph_deploy][ERROR ] KeyboardInterrupt
  |7 R: }6 v. @[root@compute01 deploy]# ls
8 k) i2 e0 w$ ?ceph.bootstrap-mds.keyring  ceph.bootstrap-osd.keyring  ceph.client.admin.keyring  ceph-deploy-ceph.log
6 _6 p/ p$ z- t$ ~7 J# Rceph.bootstrap-mgr.keyring  ceph.bootstrap-rgw.keyring  ceph.conf                  ceph.mon.keyring
" h# P4 k4 U; _[root@compute01 deploy]# vim /etc/ceph/ceph.conf
% F' }! I0 Q. r% ?) O. K[root@compute01 deploy]# vim ceph.conf   E, N# y3 q9 R
[root@compute01 deploy]# vim ceph
8 U) e2 t7 [! Q# b/ eceph.bootstrap-mds.keyring  ceph.bootstrap-osd.keyring  ceph.client.admin.keyring   ceph-deploy-ceph.log        / B. {& |0 @! W8 \, i
ceph.bootstrap-mgr.keyring  ceph.bootstrap-rgw.keyring  ceph.conf                   ceph.mon.keyring            
& `$ H' @- {- Q, S2 G" k  s[root@compute01 deploy]# vim ceph.mon.keyring - y9 t2 F4 M. n
[root@compute01 deploy]# vim ceph.client.admin.keyring
( i9 }8 h' i8 X7 B6 }6 |  h[root@compute01 deploy]#  ceph-deploy disk zap compute02 /dev/sdb
- Q: b. A7 N  y4 L[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
1 x, G7 T* }' j, J3 E) ~/ {1 L[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy disk zap compute02 /dev/sdb
) L/ j- h4 j, W+ B  J: {6 s9 |+ n[ceph_deploy.cli][INFO  ] ceph-deploy options:
4 q1 f# Z: p' ^. p[ceph_deploy.cli][INFO  ]  username                      : None4 A  @+ k- ^) L$ V
[ceph_deploy.cli][INFO  ]  verbose                       : False7 w* {; l, w" [& |
[ceph_deploy.cli][INFO  ]  debug                         : False* ]% z# d' D1 T' b) \* C
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False2 B/ Y  p6 n" b: Y
[ceph_deploy.cli][INFO  ]  subcommand                    : zap; T% J1 ], F  Z* Z2 s" V
[ceph_deploy.cli][INFO  ]  quiet                         : False
; U8 a1 j2 n- `9 i/ m% k[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7fa10739bad0>
5 T# E* H5 p, _2 z" O[ceph_deploy.cli][INFO  ]  cluster                       : ceph
8 S/ t6 n' O' a( g, q[ceph_deploy.cli][INFO  ]  host                          : compute021 [, A: v, R! [* C$ _9 S
[ceph_deploy.cli][INFO  ]  func                          : <function disk at 0x7fa107361230>
1 H+ _! Z' Y9 x  y- o8 z  g. V7 Z[ceph_deploy.cli][INFO  ]  ceph_conf                     : None2 }, b6 d; u/ O6 d& M6 G2 {
[ceph_deploy.cli][INFO  ]  default_release               : False+ k  t: f! \8 ~% q2 n
[ceph_deploy.cli][INFO  ]  disk                          : ['/dev/sdb']: T2 a$ p/ a* w/ t& n9 |' v6 D
[ceph_deploy.osd][DEBUG ] zapping /dev/sdb on compute02
# [$ X7 }* j# k% ?& ?* P0 s/ G[compute02][DEBUG ] connected to host: compute02 0 f; [+ F0 d" k: R* c5 S/ ^; g
[compute02][DEBUG ] detect platform information from remote host
% w& t; n1 a" C, \" C! U0 S7 S# `[compute02][DEBUG ] detect machine type
1 e, K# `( F( o, l' A  L* g[compute02][DEBUG ] find the location of an executable& F5 Q$ A. w; w% z
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core) ~0 s) c9 J% l
[compute02][DEBUG ] zeroing last few blocks of device0 y" ]  E! a) k* S
[compute02][DEBUG ] find the location of an executable% M9 X, H2 A. n! U' j8 P) ?* n
[compute02][INFO  ] Running command: /usr/sbin/ceph-volume lvm zap /dev/sdb
$ O. c# ^; P" O! m7 U  g: C! M) D8 w[compute02][WARNIN] --> Zapping: /dev/sdb
9 {7 g/ J+ a' P[compute02][WARNIN] --> --destroy was not specified, but zapping a whole device will remove the partition table) i& f# V5 [$ P' f" }+ U0 \& E
[compute02][WARNIN] Running command: /usr/bin/dd if=/dev/zero of=/dev/sdb bs=1M count=10 conv=fsync
% o4 S' q0 E5 g8 O[compute02][WARNIN]  stderr: 10+0 records in$ a2 m1 ?: _1 {' I, X5 p
[compute02][WARNIN] 10+0 records out/ G: S# U+ w2 E4 ]; C+ U0 V* M! m
[compute02][WARNIN] 10485760 bytes (10 MB) copied5 F: ~3 \/ ~, B+ Y7 i' M
[compute02][WARNIN]  stderr: , 0.0869923 s, 121 MB/s
! @- a& O, ~4 v2 l* n8 @9 j[compute02][WARNIN] --> Zapping successful for: <Raw Device: /dev/sdb>" a: h  C& W* S9 r' d, Q6 @- M' F
[root@compute01 deploy]# ceph-deploy osd create compute02 --data /dev/sdb
' \4 D0 H) m/ x1 G[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf4 ^: v" t) ?" v+ L0 ?8 J4 e
[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create compute02 --data /dev/sdb7 Q- g4 M8 m" T1 H5 U3 k4 F  O) N
[ceph_deploy.cli][INFO  ] ceph-deploy options:
5 m( ]' c7 E" F9 t+ f9 m, N[ceph_deploy.cli][INFO  ]  verbose                       : False
8 y; k9 Y. z# j5 L$ h* w[ceph_deploy.cli][INFO  ]  bluestore                     : None
" V  J' h9 q. a; L* `1 O[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7f13835dd690>
$ D4 m8 I5 h8 y* ?) b1 {3 ]! o[ceph_deploy.cli][INFO  ]  cluster                       : ceph
8 L* Y; d! S2 ]9 G& ~0 [[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
" H: O8 b  j( A* M[ceph_deploy.cli][INFO  ]  block_wal                     : None
- c$ i; J# z3 ^4 T% n0 ^4 z" S. P[ceph_deploy.cli][INFO  ]  default_release               : False, m$ i- Z/ ^3 l' M
[ceph_deploy.cli][INFO  ]  username                      : None) r8 u5 V6 {) a9 |7 |& B0 ?. u
[ceph_deploy.cli][INFO  ]  journal                       : None
! G- R2 J, j6 Y& K[ceph_deploy.cli][INFO  ]  subcommand                    : create$ v; F( [2 L, o& D+ Q! p" \" G8 ~
[ceph_deploy.cli][INFO  ]  host                          : compute02+ _: l/ N) P; g/ N7 A
[ceph_deploy.cli][INFO  ]  filestore                     : None
; B2 ~0 B; [% n* r* A7 \[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f138360b1b8>  N0 o4 M/ \7 Y# J
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
- ~2 d6 z( t1 ~+ U1 A7 Y0 L* S[ceph_deploy.cli][INFO  ]  zap_disk                      : False
, c/ }) t% [+ D[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb+ `& ]# y3 o+ ^& f. P) @# n$ B
[ceph_deploy.cli][INFO  ]  block_db                      : None1 Q2 c' y( W, h0 L! y! b/ z
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False: f7 z0 I* o  u6 g
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
* d: H6 l- `" q8 ][ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
) S' \6 r7 t" Q# x8 s) [- c' {[ceph_deploy.cli][INFO  ]  quiet                         : False' V8 ?. h% f2 a6 q
[ceph_deploy.cli][INFO  ]  debug                         : False, W+ R" Q9 ~5 b* ?4 C5 R
[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
  q! M! v/ u% B! Q: r[compute02][DEBUG ] connected to host: compute02
* M7 M9 C( n) X+ U  U* p[compute02][DEBUG ] detect platform information from remote host
% P9 _2 a+ ^% T6 _5 h[compute02][DEBUG ] detect machine type
4 b7 h, _; E# I[compute02][DEBUG ] find the location of an executable' W- y! g/ u8 v' K3 H) f9 c
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core
- x' X1 o) S9 Y! L- K/ l/ |' d[ceph_deploy.osd][DEBUG ] Deploying osd to compute02
. m" ]7 D1 S, q: S! ]+ {3 _[compute02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf$ m8 U% g1 x+ |+ y9 Y
[compute02][DEBUG ] find the location of an executable0 C5 K6 v: R* p1 i
[compute02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
$ s; M6 O4 {  ?9 h" d0 V[compute02][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key9 H: m3 O, [/ G4 c: p
[compute02][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new c71415c4-fdaa-4955-8c26-71d160c6121e$ z+ E( _$ p, e; Q- y( `1 M
[compute02][WARNIN] No data was received after 300 seconds, disconnecting...
9 L1 ^/ [$ G; w: t5 r[compute02][INFO  ] checking OSD status...' ~8 |9 ~* b6 B0 i
[compute02][DEBUG ] find the location of an executable
- D! R  W8 \, d, G9 c& ?[compute02][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json! t. Y: R2 I5 b9 g: K( {! F2 v
[ceph_deploy.osd][DEBUG ] Host compute02 is now ready for osd use.( s2 n  p, S8 J# x# p1 B
[root@compute01 deploy]# ceph osd tree  Q5 H! s6 I- o/ _$ H/ C! K
ID CLASS WEIGHT  TYPE NAME          STATUS REWEIGHT PRI-AFF
- \9 @% q8 g  Z' p-1       2.00000 root default                               6 t' B& i% v: q- C
-5       1.00000     host compute01                        
! V: x" o% \+ a' }' r+ _2 w 1   hdd 1.00000         osd.1          up  1.00000 1.00000 1 U9 c6 L( @& C# x9 \
-3             0     host compute02                         + R+ [+ |* o2 T1 H& `, P3 Y- q- z
-7       1.00000     host compute03                         $ |  b6 }6 O3 w$ c* U
2   hdd 1.00000         osd.2          up  1.00000 1.00000
8 X, g, B( G+ ]# q# t; h( \ 0             0 osd.0                down        0 1.00000
9 K4 t* S4 ]6 g& ^1 H" W/ A5 t8 o 3             0 osd.3                down        0 1.00000 - s8 T, r# Z* w; B+ s
" E- g2 Z' [8 l" l$ _' x6 o) `
在另一台节点上:
- g$ S5 S. Y  n[root@compute02 ~]# ceph osd tree
( D5 L( d' s; X  v4 o
, u- h0 }9 H, @5 G' ?: V( \- ]5 n2 f, c4 E$ Y
d^CInterrupted  Z1 U/ \; q4 X5 P6 _
Traceback (most recent call last):5 U6 _7 m1 c; y% A2 {: p+ Q& j' Y
  File "/usr/bin/ceph", line 1269, in <module>
8 s3 o$ r3 D4 A- R  ^8 ]    retval = main()3 F6 S+ B* P/ e2 Y# j$ q+ j) N
  File "/usr/bin/ceph", line 1200, in main
, o5 r# d; t: e0 k) f9 J7 T    verbose)
7 G' `+ M( {. l; N+ K6 D  File "/usr/bin/ceph", line 622, in new_style_command  @- \" H% W# k1 U, o, i
    ret, outbuf, outs = do_command(parsed_args, target, cmdargs, sigdict, inbuf, verbose)' J- v/ T/ _+ M% ^
  File "/usr/bin/ceph", line 596, in do_command- }" m& R1 G% V! C8 Q/ O2 T
    return ret, '', ''
$ W) `! k* R9 f6 ~UnboundLocalError: local variable 'ret' referenced before assignment) H' v9 u# X6 x7 h8 v5 s2 y
[root@compute02 ~]# date
# H6 }& F8 Q2 C% D8 N. `# E" c- z4 T' z. Z' T% }9 Y* i
连osd tree都不能查看:
, O" {: D, s2 B  U没办法,只能查看日志:
, ^9 `9 T6 k) w
6 o! j" l' r4 K, @/ b- C: Q: D3 _) C1 v
ceph的log日志视乎没有任何帮助,和ceph -s的状态一样。
; l( K& Z' a# M3 p1 p% [; |8 Z[root@compute02 ~]# tail -f /var/log/ceph/ceph.log
1 z* x3 |: g3 `6 m: Z2 S, f2021-08-31 14:13:37.918792 mgr.compute01 (mgr.49274115) 15887 : cluster [DBG] pgmap v8821: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 34 KiB/s rd, 41 op/s; 13701/41103 objects degraded (33.333%)# G+ L1 v. s; [: h
2021-08-31 14:13:41.694764 mon.compute01 (mon.0) 717737 : cluster [WRN] Health check update: 528 slow ops, oldest one blocked for 477 sec, mon.compute02 has slow ops (SLOW_OPS): k! g8 m; \, C3 e
2021-08-31 14:13:46.695723 mon.compute01 (mon.0) 717740 : cluster [WRN] Health check update: 535 slow ops, oldest one blocked for 482 sec, mon.compute02 has slow ops (SLOW_OPS)  F. I6 ~2 }! `" y, q
2021-08-31 14:13:39.919835 mgr.compute01 (mgr.49274115) 15888 : cluster [DBG] pgmap v8822: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 34 KiB/s rd, 41 op/s; 13701/41103 objects degraded (33.333%)8 R, V" s8 n& i& i5 l2 L
2021-08-31 14:13:41.921160 mgr.compute01 (mgr.49274115) 15892 : cluster [DBG] pgmap v8823: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 34 KiB/s rd, 41 op/s; 13701/41103 objects degraded (33.333%)
$ Z4 j' j* Z# j6 E: O' ^; ~7 ?% J! C2021-08-31 14:13:43.925789 mgr.compute01 (mgr.49274115) 15893 : cluster [DBG] pgmap v8824: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 34 KiB/s rd, 41 op/s; 13701/41103 objects degraded (33.333%)
+ ]; F, e1 f; @+ \" U2021-08-31 14:13:45.927393 mgr.compute01 (mgr.49274115) 15894 : cluster [DBG] pgmap v8825: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 7.7 KiB/s rd, 9 op/s; 13701/41103 objects degraded (33.333%): G% j6 \$ U5 x; x  v
2021-08-31 14:13:47.930528 mgr.compute01 (mgr.49274115) 15895 : cluster [DBG] pgmap v8826: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 7.7 KiB/s rd, 9 op/s; 13701/41103 objects degraded (33.333%)
& V' a/ D8 Z: h- D; x2 i2021-08-31 14:13:51.696665 mon.compute01 (mon.0) 717744 : cluster [WRN] Health check update: 544 slow ops, oldest one blocked for 487 sec, mon.compute02 has slow ops (SLOW_OPS), J* i" R1 j2 }2 o$ I
2021-08-31 14:13:56.697487 mon.compute01 (mon.0) 717766 : cluster [WRN] Health check update: 547 slow ops, oldest one blocked for 492 sec, mon.compute02 has slow ops (SLOW_OPS)
" e3 z# _# p. e2021-08-31 14:13:49.932275 mgr.compute01 (mgr.49274115) 15899 : cluster [DBG] pgmap v8827: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 13701/41103 objects degraded (33.333%)
. s0 O, {" i; A: o; |7 m2021-08-31 14:13:51.934115 mgr.compute01 (mgr.49274115) 15900 : cluster [DBG] pgmap v8828: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 13701/41103 objects degraded (33.333%): ^8 Y) `0 j/ P* ]
2021-08-31 14:13:53.938702 mgr.compute01 (mgr.49274115) 15901 : cluster [DBG] pgmap v8829: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 13701/41103 objects degraded (33.333%)
* g- k7 W9 ]) r9 h2 G3 @" u2021-08-31 14:13:55.939684 mgr.compute01 (mgr.49274115) 15905 : cluster [DBG] pgmap v8830: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 13701/41103 objects degraded (33.333%)
+ D7 R( L/ i9 |8 C" w4 y2021-08-31 14:13:57.944025 mgr.compute01 (mgr.49274115) 15906 : cluster [DBG] pgmap v8831: 640 pgs: 432 active+undersized+degraded, 208 active+undersized; 11 GiB data, 22 GiB used, 2.7 TiB / 2.7 TiB avail; 13701/41103 objects degraded (33.333%)
) l% O+ p8 g/ V" T; c; Q2021-08-31 14:14:01.698441 mon.compute01 (mon.0) 717769 : cluster [WRN] Health check update: 554 slow ops, oldest one blocked for 497 sec, mon.compute02 has slow ops (SLOW_OPS)
* a% b8 P" Q$ K( y4 f0 Q^C
. s5 {9 T+ L4 @5 t5 ?6 r. K- \. s% y, v( C6 Y' [1 k* t

5 O: {% Q! U# M( L2 d查看ceph-osd.0.log时发现  osd.0 553 unable to obtain rotating service keys; retrying,意思就是时间不同步,不停的重试中:
7 {2 G7 d6 e6 l& W) Q( a0 f: U8 }4 ]& O
[root@compute02 ~]# tail -f /var/log/ceph/ceph-osd.
! T6 F' ~$ k2 n5 ^  ?ceph-osd.0.log              ceph-osd.0.log-20210826.gz  ceph-osd.0.log-20210828.gz  ceph-osd.0.log-20210830.gz  ceph-osd.1.log
$ z# k7 d0 B% Q8 g; _. iceph-osd.0.log-20210825.gz  ceph-osd.0.log-20210827.gz  ceph-osd.0.log-20210829.gz  ceph-osd.0.log-20210831.gz  ceph-osd.1.log-20210721.gz; K) z1 L6 u0 |! h7 A  p
[root@compute02 ~]# tail -f /var/log/ceph/ceph-osd.$ m$ i4 _$ N4 M9 ]$ `- J; g/ D0 ~
ceph-osd.0.log              ceph-osd.0.log-20210826.gz  ceph-osd.0.log-20210828.gz  ceph-osd.0.log-20210830.gz  ceph-osd.1.log
* Y; j0 Q+ Z& j7 Dceph-osd.0.log-20210825.gz  ceph-osd.0.log-20210827.gz  ceph-osd.0.log-20210829.gz  ceph-osd.0.log-20210831.gz  ceph-osd.1.log-20210721.gz. W# k$ e, J& S/ K3 N& x( c
[root@compute02 ~]# tail -f /var/log/ceph/ceph-osd.0.log+ }* N* r/ W' ~" P0 f
2021-08-31 11:25:00.808 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30
3 W* H' V& W! P2021-08-31 11:25:00.808 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying: R  m) }- e, Q7 f
2021-08-31 11:25:30.808 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30
9 x) {  g7 b! d1 i2021-08-31 11:25:30.808 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying- x& }8 V6 g2 @4 Q0 F7 K* Q
2021-08-31 11:26:00.808 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 303 p) p7 W& x) K! x( [
2021-08-31 11:26:00.808 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying
. }7 `4 W& {2 F# d# s2021-08-31 11:26:30.809 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30
1 R' r* C- M9 M2021-08-31 11:26:30.809 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying
8 ^& e: ~  q: s# D1 V2021-08-31 11:27:00.809 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30
/ T( I- v$ E6 X6 ]2021-08-31 11:27:00.809 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying/ j% d/ l" K$ g# a9 u0 I3 ~

  Z! {) R  v* E1 {) I^C' t# n4 m2 B7 v4 M. o  L6 ~
- d8 }; _: b. `( V& J. X

. C$ \' ^1 w/ N: L0 B. t9 N  ^( X$ V! h+ T7 p( w; K
^C^C
) F% b, ?/ ^. [) r$ x[root@compute02 ~]# ^C6 S! B  F1 \/ ~  p3 e) ]
[root@compute02 ~]# tail -f /var/log/ceph/ceph-osd.* U( H7 g" i! P9 d- s  c
ceph-osd.0.log              ceph-osd.0.log-20210826.gz  ceph-osd.0.log-20210828.gz  ceph-osd.0.log-20210830.gz  ceph-osd.1.log
: B1 n, X8 [* x$ |, L0 Qceph-osd.0.log-20210825.gz  ceph-osd.0.log-20210827.gz  ceph-osd.0.log-20210829.gz  ceph-osd.0.log-20210831.gz  ceph-osd.1.log-20210721.gz
; o( W. {8 ^- z$ {( K[root@compute02 ~]# tail -f /var/log/ceph/ceph-osd.0.log4 ?& i6 z1 ?1 M, S
2021-08-31 11:25:00.808 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30
$ o: ?' H* J/ e% Z$ Z2021-08-31 11:25:00.808 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying2 g/ W5 X- \( Z2 a( ^5 q) }, a* ?
2021-08-31 11:25:30.808 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 305 I( l/ L) A9 S/ b- s% Z; [; W
2021-08-31 11:25:30.808 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying
. M6 _6 C8 L+ c2 e+ h) s2021-08-31 11:26:00.808 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30* k1 M$ A3 ^- D3 Q  i6 e3 W4 f
2021-08-31 11:26:00.808 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying, ?2 b2 X' H- D, m3 S( u
2021-08-31 11:26:30.809 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30# ?/ R: v8 U& |- T2 g0 ]0 x( {
2021-08-31 11:26:30.809 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying) z% Z( k) j9 j+ a& q
2021-08-31 11:27:00.809 7f80e5e47a80  0 monclient: wait_auth_rotating timed out after 30. O0 A; h  ~+ t1 q7 Y% n. d
2021-08-31 11:27:00.809 7f80e5e47a80 -1 osd.0 553 unable to obtain rotating service keys; retrying
+ I) v/ m9 F. P2 J. s2 G% y- A3 k! `^C
: f+ q7 |2 \! F9 w. z/ V- B) Z0 \' ^6 l( k

  i4 G! c) \+ {- O0 q+ l同步时间吧:虽然安装的chrony时间同步,但我们同步的时候,还是使用ntpdate比较快些:. a2 f4 _# `0 k6 I5 `
; s& q5 ~7 X  U; O
[root@compute02 ~]# ntpdate -u compute01. v' ~5 e1 r; \  C6 X; P
31 Aug 14:16:21 ntpdate[6103]: step time server 192.168.0.75 offset -14.901748 sec
: E/ F! k; u, B+ h0 P2 u/ F" J# J$ ^7 X# f# a& Z3 u( e
同步完成之后,发现osd tree 命令好使了:- y" l' i) x! W& P: D& h

. Q5 R/ E4 r% D7 i[root@compute02 ~]# ceph osd tree: J- u3 d: t! U$ i- e* [& h3 _2 {
ID CLASS WEIGHT  TYPE NAME          STATUS REWEIGHT PRI-AFF 2 k+ f% K- n/ |
-1       2.00000 root default                              
( `) p1 c2 d" F-5       1.00000     host compute01                         # L6 z; z9 K0 E) Y* C: Z0 v
1   hdd 1.00000         osd.1          up  1.00000 1.00000
2 Y7 w8 g6 s% V+ D; Q-3             0     host compute02                         + Q& G+ c( r* i5 O" p3 b
-7       1.00000     host compute03                        
5 {/ y" N! C; {5 @ 2   hdd 1.00000         osd.2          up  1.00000 1.00000 ' w4 Q$ a* R$ D
0             0 osd.0                down        0 1.00000
  Q# M8 s5 Y3 M  ~, B* Z 3             0 osd.3                down        0 1.00000
3 t7 _# z9 N2 C  f; L: U) w, D+ w. W$ [; \9 }' g! C8 @
把down状态的osd清理掉:! j) n4 z+ v2 y  r

4 H1 Z9 c4 S, z  P+ }" v7 j[root@compute02 ~]# ceph osd out osd.30 r( m3 ^" T0 N
osd.3 is already out.
- V7 O; C' k- u- c. P/ A+ {- {7 q[root@compute02 ~]# ceph osd rm osd.3
( f2 N* n7 ^7 _removed osd.3
; ~4 m% x! T' l  I. C! I, [* r1 f( l[root@compute02 ~]# ceph osd crush remove osd.3
% ^! h) J9 P% z+ u$ ddevice 'osd.3' does not appear in the crush map
4 S$ a, U, E/ v4 w% Z1 q[root@compute02 ~]# ceph osd rm osd.3
$ F, s8 R3 d7 R8 fosd.3 does not exist. * q9 T: U3 ^* o1 v5 S
[root@compute02 ~]#  ceph auth del osd.3" [# [/ |5 L+ f0 X# F
updated8 E# w4 G) V, Q4 T) A, w

: c# E- j7 E. J[root@compute02 ~]# ceph osd in osd.00 e1 x+ j9 B+ g0 ]
marked in osd.0. 1 [5 W4 \: G) x, x
[root@compute02 ~]# ceph osd tree
: u9 J  `4 f: i2 o# iID CLASS WEIGHT  TYPE NAME          STATUS REWEIGHT PRI-AFF 3 K5 ~7 `% E2 @' j* S
-1       2.00000 root default                               # S- M/ j0 O3 A9 Y; o/ f) f: P
-5       1.00000     host compute01                        
3 Z3 a# _$ r7 s$ o3 M 1   hdd 1.00000         osd.1          up  1.00000 1.00000 2 f, l# e7 ~( C& u
-3             0     host compute02                        
+ g, D1 w2 H1 V. g-7       1.00000     host compute03                        
3 Z5 N( d8 `. h: \( v* B8 j' X2 Q 2   hdd 1.00000         osd.2          up  1.00000 1.00000 ; G3 l6 F  P/ A$ X, q. c
0             0 osd.0                down  1.00000 1.00000 7 B0 @% _- ]4 @4 h6 I+ ?" ], M
[root@compute02 ~]# systemctl restart ceph-osd@ 3 G7 z: o" J5 u( f; B- L9 c
Display all 337 possibilities? (y or n)
0 r# [$ D1 o; a  g[root@compute02 ~]# systemctl restart ceph-osd@ 0 d- J( ?+ y& Y5 Q0 z- R
Display all 337 possibilities? (y or n)
0 q- \4 A' y; u9 {* g! x[root@compute02 ~]# systemctl restart ceph-osd@0
8 \- D9 p8 K: xJob for ceph-osd@0.service failed because the control process exited with error code. See "systemctl status ceph-osd@0.s# @1 Z5 @! U7 g' U9 s5 p6 Z' ]" u
9 N2 j2 ^, O, ^1 S) @2 M$ S7 i$ J
( L" t5 I$ t: d" r' _  Q. k6 b

. d) Y8 q4 Z0 Y  l9 V' @6 u, Y重新添加ceph磁盘osd:
  h, y9 p4 ~6 A/ g/ w/ j8 R# @. Y+ K
[root@compute01 deploy]# ceph-deploy osd create compute02 --data /dev/sdb " N6 k7 h0 ~1 o7 u& [$ y. C" s
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
6 ]9 _2 r- b  w% m+ y: I1 K% V[ceph_deploy.cli][INFO  ] Invoked (2.0.1): /usr/bin/ceph-deploy osd create compute02 --data /dev/sdb- k0 ?! ]1 O0 u$ @2 |+ d6 J- C
[ceph_deploy.cli][INFO  ] ceph-deploy options:8 A: k$ |; m* ?/ J2 M5 n
[ceph_deploy.cli][INFO  ]  verbose                       : False
$ Z* M  t! F( c2 p, r[ceph_deploy.cli][INFO  ]  bluestore                     : None
; F5 B/ T  C+ n+ V[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf object at 0x7f518f889690>
) r3 b& X, v' h: ?4 X0 s8 K4 S[ceph_deploy.cli][INFO  ]  cluster                       : ceph
* Q0 _4 }* F% T3 b. x% l[ceph_deploy.cli][INFO  ]  fs_type                       : xfs* g5 ]/ X" h  |) }1 l
[ceph_deploy.cli][INFO  ]  block_wal                     : None+ C5 C) J; m; X! I# V  Q5 F3 |
[ceph_deploy.cli][INFO  ]  default_release               : False) k! U- `* b" d3 E: H
[ceph_deploy.cli][INFO  ]  username                      : None
8 v- ^/ h4 j' x2 M& A/ I[ceph_deploy.cli][INFO  ]  journal                       : None& K0 ~$ u0 M# b
[ceph_deploy.cli][INFO  ]  subcommand                    : create3 t4 q# H8 t' d7 I+ t' E
[ceph_deploy.cli][INFO  ]  host                          : compute02
$ |) z( Z: e' V% c5 d) a[ceph_deploy.cli][INFO  ]  filestore                     : None
. \! V% S9 w( e8 C4 `( s[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f518f8b71b8>
( s# b& a/ y% b& W! D; i* r: i[ceph_deploy.cli][INFO  ]  ceph_conf                     : None0 f4 W4 H3 K2 u" j) N5 k3 n
[ceph_deploy.cli][INFO  ]  zap_disk                      : False, ?6 q3 G+ y/ r* k) L" L
[ceph_deploy.cli][INFO  ]  data                          : /dev/sdb2 H( b# x! M- T: K' k" [4 z
[ceph_deploy.cli][INFO  ]  block_db                      : None
+ V6 o$ A3 F3 ^[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
4 e7 C  [' x! C0 v; Z+ q3 f[ceph_deploy.cli][INFO  ]  overwrite_conf                : False. ?% m( w% W1 X& t( s# H
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
) J# I) b# J0 n0 B[ceph_deploy.cli][INFO  ]  quiet                         : False5 v' L) i* U* K  f* m9 T
[ceph_deploy.cli][INFO  ]  debug                         : False
9 i, p" K8 L* X8 A0 K' t9 B  D: M[ceph_deploy.osd][DEBUG ] Creating OSD on cluster ceph with data device /dev/sdb
4 |  g0 B% w! M- T! F5 Q[compute02][DEBUG ] connected to host: compute02
) h# s5 m8 o' X% E+ \* ]' I4 O( r[compute02][DEBUG ] detect platform information from remote host
2 G/ L2 q- y# a& ?, h3 R  ^8 {2 i[compute02][DEBUG ] detect machine type
" s# c4 e0 p4 o0 K[compute02][DEBUG ] find the location of an executable, O$ S/ E  u$ y2 T6 ?( a. k, R: M) O
[ceph_deploy.osd][INFO  ] Distro info: CentOS Linux 7.9.2009 Core& Z5 T$ W  a! A) `" m; I. \
[ceph_deploy.osd][DEBUG ] Deploying osd to compute02" p' M0 e; p- w; w& ^" r( j
[compute02][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
" j+ n$ J6 S: Y5 s6 z[compute02][DEBUG ] find the location of an executable
9 F% v; j7 q9 n& @[compute02][INFO  ] Running command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/sdb
  x% X8 v" @; z[compute02][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key' A& G( D( F8 H2 S- N
[compute02][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new bbfd988d-bb2e-44f2-a366-17501a8bbada
) Y9 e( U" B. V[compute02][WARNIN] Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-bbfd988d-bb2e-44f2-a366-17501a8bbada ceph-0f6fa318-810d-4df6-8d52-afeac6f9a463
& q0 S& ^0 \! `4 {3 a% W[compute02][WARNIN]  stdout: Logical volume "osd-block-bbfd988d-bb2e-44f2-a366-17501a8bbada" created.- T1 d% N4 Q8 a  f3 n
[compute02][WARNIN] Running command: /usr/bin/ceph-authtool --gen-print-key
1 {% d  P$ I2 [' F  C, y[compute02][WARNIN] Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
, G( _6 |+ e4 c[compute02][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-0f6fa318-810d-4df6-8d52-afeac6f9a463/osd-block-bbfd988d-bb2e-44f2-a366-17501a8bbada
  b0 e9 a* w* q7 {6 f: G# t[compute02][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-20 b. j2 F1 n8 {2 n3 X4 ?
[compute02][WARNIN] Running command: /usr/bin/ln -s /dev/ceph-0f6fa318-810d-4df6-8d52-afeac6f9a463/osd-block-bbfd988d-bb2e-44f2-a366-17501a8bbada /var/lib/ceph/osd/ceph-0/block4 G1 T- v* `% p8 r
[compute02][WARNIN] Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
  O. [' ~( t( n9 v6 h[compute02][WARNIN]  stderr: 2021-08-31 14:21:34.978 7fb58c080700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory, e. n( C" ^) w! s3 F0 o
[compute02][WARNIN] 2021-08-31 14:21:34.978 7fb58c080700 -1 AuthRegistry(0x7fb584065de8) no keyring found at /etc/ceph/ceph.client.bootstrap-osd.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,, disabling cephx8 v0 \9 E; ?' G
[compute02][WARNIN]  stderr: got monmap epoch 3
# P# I- ^1 m, r0 w1 X' W[compute02][WARNIN] Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQBsyi1hTYl7OhAAMzTvAS8eKANTVRi882qXxw==, S; i4 \( D% G
[compute02][WARNIN]  stdout: creating /var/lib/ceph/osd/ceph-0/keyring3 `. x/ n2 t( E
[compute02][WARNIN] added entity osd.0 auth(key=AQBsyi1hTYl7OhAAMzTvAS8eKANTVRi882qXxw==)
0 m8 S0 H6 K( _[compute02][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
1 \9 I4 j* L) K1 u5 N' ^[compute02][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
7 D2 T4 i& B3 I" p. c. ?[compute02][WARNIN] Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid bbfd988d-bb2e-44f2-a366-17501a8bbada --setuser ceph --setgroup ceph6 P  c/ R/ h3 w7 p
[compute02][WARNIN] --> ceph-volume lvm prepare successful for: /dev/sdb
8 O$ ^9 I! t* z& B" T+ v6 T$ G[compute02][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-08 W7 O/ ?: F  V
[compute02][WARNIN] Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-0f6fa318-810d-4df6-8d52-afeac6f9a463/osd-block-bbfd988d-bb2e-44f2-a366-17501a8bbada --path /var/lib/ceph/osd/ceph-0 --no-mon-config
( D/ U- S/ k; S+ W% k5 k[compute02][WARNIN] Running command: /usr/bin/ln -snf /dev/ceph-0f6fa318-810d-4df6-8d52-afeac6f9a463/osd-block-bbfd988d-bb2e-44f2-a366-17501a8bbada /var/lib/ceph/osd/ceph-0/block7 ^( A4 ]/ k2 U: e2 a5 T& k
[compute02][WARNIN] Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-0/block! [# |$ ]2 s) q5 R; p  N
[compute02][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
8 g  I0 ?4 ~, O/ M6 _[compute02][WARNIN] Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0
6 K, k- I1 a9 T# D; {[compute02][WARNIN] Running command: /usr/bin/systemctl enable ceph-volume@lvm-0-bbfd988d-bb2e-44f2-a366-17501a8bbada
: s7 ~) U1 s% O* ]& o1 v1 E[compute02][WARNIN]  stderr: Created symlink from [url=]/etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-0-bbfd988d-bb2e-44f2-a366-17501a8bbada.service[/url] to [url=]/usr/lib/systemd/system/ceph-volume@.service[/url].( G5 G7 {# q  J) z0 e! T& I5 r
[compute02][WARNIN] Running command: /usr/bin/systemctl enable --runtime ceph-osd@0
; M+ `$ y! b4 o/ D: M4 ^2 d% T0 D7 u[compute02][WARNIN]  stderr: Created symlink from [url=]/run/systemd/system/ceph-osd.target.wants/ceph-osd@0.service[/url] to [url=]/usr/lib/systemd/system/ceph-osd@.service[/url].( j: e7 e) k8 f" m) A  h" j1 l
[compute02][WARNIN] Running command: /usr/bin/systemctl start ceph-osd@0
* a( \, g3 r. m[compute02][WARNIN] --> ceph-volume lvm activate successful for osd ID: 0
8 y: s6 A% k: U/ C[compute02][WARNIN] --> ceph-volume lvm create successful for: /dev/sdb
- b9 t" D/ m, L0 u) I, ?[compute02][INFO  ] checking OSD status...
! t: X' F: |) S[compute02][DEBUG ] find the location of an executable, R2 c; t5 @. S$ t' R% t# a
[compute02][INFO  ] Running command: /bin/ceph --cluster=ceph osd stat --format=json
: D* E, P* o' h0 i[ceph_deploy.osd][DEBUG ] Host compute02 is now ready for osd use.4 o! x+ h5 u/ n
  Y  D4 b$ d1 |

! l! P0 [, c, Z+ e' @成功创建。这个坑确实有点过分。ceph存储一定要先解决时间不同步问题,否则有很多意想不到的惊喜:
! o) a- O! U# B% P4 ]' a6 R% N: I' a% T8 a2 F4 ?/ R

3 n' N- X: Q% v4 h) S" Y4 w[root@compute01 deploy]# ceph osd status  
3 T" V( i5 E- |1 ~, B+----+-----------+-------+-------+--------+---------+--------+---------+-----------+9 U- ~7 T! D3 `8 v- Q9 H1 p0 s1 L1 K
| id |    host   |  used | avail | wr ops | wr data | rd ops | rd data |   state   |. \& I* S  q2 H% u# U1 z  e1 {
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+. u+ e/ x. H6 m1 q
| 0  | compute02 | 1096M |  557G |    0   |     0   |    0   |     0   | exists,up |* H* h: l' Q, E* ?# O7 n$ ?
| 1  | compute01 | 12.1G | 2223G |    0   |     0   |    0   |     0   | exists,up |
  m$ U  f( Z. \4 P# E| 2  | compute03 | 12.1G |  546G |    0   |     0   |    0   |     0   | exists,up |
4 h3 w1 Z( ?( S7 N0 ^: S# A% m+----+-----------+-------+-------+--------+---------+--------+---------+-----------+- _& U2 M, u6 t" |' p
[root@compute01 deploy]# # c4 z; O! {  L
[root@compute01 deploy]#
4 ^9 w' h- |/ r& }0 t[root@compute01 deploy]# 3 [/ A. |+ c* L/ W* W6 B
[root@compute01 deploy]# ceph osd status  4 u0 \1 R# U6 c$ N
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+: D; d+ ^/ K- ~" ~9 C9 d# B, }
| id |    host   |  used | avail | wr ops | wr data | rd ops | rd data |   state   |2 s+ X8 b' u- K6 p' P
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+
: l( u. V: p/ b0 v| 0  | compute02 | 1096M |  557G |    0   |     0   |    0   |     0   | exists,up |$ z- F9 q4 h9 P1 l2 y
| 1  | compute01 | 12.1G | 2223G |    0   |     0   |   10   |   327   | exists,up |
  V8 K" t& o4 ^  M$ A. O| 2  | compute03 | 12.1G |  546G |    0   |     0   |    9   |   266   | exists,up |  L9 I* ?! l- v9 s' }
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+
* {+ |4 O7 E5 u$ s[root@compute01 deploy]# ceph osd status  * Z$ Z" j9 q- {4 h0 C- L3 X) J0 U
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+
! V7 s% \1 _8 y/ |) _| id |    host   |  used | avail | wr ops | wr data | rd ops | rd data |   state   |% Z. b2 O) [( L0 w7 y+ E0 Y
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+
: ?0 k% x& M+ n/ B& L# M| 0  | compute02 | 1096M |  557G |    0   |     0   |    0   |     0   | exists,up |+ I1 R( R# l! s  _' }( E. D+ ]" L
| 1  | compute01 | 12.1G | 2223G |    0   |     0   |   10   |   327   | exists,up |) T3 B9 _0 @* |5 G$ s: T3 s
| 2  | compute03 | 12.1G |  546G |    0   |     0   |    9   |   266   | exists,up |/ k0 k% b: ~: k! Z& t. S+ r
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+
1 X9 N4 |: ^3 w8 N% M$ Y3 N[root@compute01 deploy]# ceph osd status  , Q0 Y4 o" e- I3 x3 ?
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+9 k  Y& S9 p, o$ [1 a( r  ]! i
| id |    host   |  used | avail | wr ops | wr data | rd ops | rd data |   state   |
2 U7 _0 i% b: m' i% [! K+----+-----------+-------+-------+--------+---------+--------+---------+-----------+, Z0 w- [3 V( x, J/ `' z
| 0  | compute02 | 1096M |  557G |    0   |     0   |    0   |     0   | exists,up |3 ?( U1 b1 Q, T! p9 ]
| 1  | compute01 | 12.1G | 2223G |    0   |     0   |    0   |     0   | exists,up |
0 @" l) o, D# |1 t: h+ @| 2  | compute03 | 12.1G |  546G |    0   |     0   |    0   |     0   | exists,up |
7 ]4 N" ]0 k5 `) Z) T. u+----+-----------+-------+-------+--------+---------+--------+---------+-----------+% E& C1 Y3 l' C, d
[root@compute01 deploy]# ceph osd status  
8 L0 {/ t, f) f2 T: ~0 n5 y+----+-----------+-------+-------+--------+---------+--------+---------+-----------+
( o5 z- p2 t  ?7 M5 y: @| id |    host   |  used | avail | wr ops | wr data | rd ops | rd data |   state   |# F: F) L  V, o
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+1 A' h( K1 E! Y
| 0  | compute02 | 1096M |  557G |    0   |     0   |    0   |     0   | exists,up |" k, h9 H0 M1 p, b' S
| 1  | compute01 | 12.1G | 2223G |    0   |     0   |    0   |     0   | exists,up |/ B9 J0 t! t, W9 n: T  F
| 2  | compute03 | 12.1G |  546G |    0   |     0   |    0   |     0   | exists,up |# T1 ]  Q3 |( R% P+ w, Y4 a4 N  s
+----+-----------+-------+-------+--------+---------+--------+---------+-----------+7 Q9 ?3 \. x, i/ {# d3 F
 楼主| 发表于 2021-8-31 15:23:15 | 显示全部楼层
[root@compute01 ~]# ceph osd tree0 N. r$ w* c' E. w- b
ID CLASS WEIGHT  TYPE NAME          STATUS REWEIGHT PRI-AFF & R5 W6 {# {- {- e% l2 h) j9 J+ y
-1       2.00000 root default                               # H. ?: {: n5 H, [" D
-5       1.00000     host compute01                           M; X" M9 Y( s2 z, p
1   hdd 1.00000         osd.1          up  1.00000 1.00000 1 ?! ^; p- X  G& E* M$ G
-3             0     host compute02                           [& e, h4 U3 L! M
-7       1.00000     host compute03                        
8 ^$ _% g& y+ y) o 2   hdd 1.00000         osd.2          up  1.00000 1.00000
' Q% i. t' |1 j3 Z; l# e 0   hdd       0 osd.0                  up  1.00000 1.00000
& M/ o! q6 r( Z
9 k6 ~/ T) e& D- O[root@compute01 ~]# ceph osd crush set osd.0 1.00000 host=compute023 n1 w" y" F7 e6 c8 `6 e
set item id 0 name 'osd.0' weight 1 at location {host=compute02} to crush map5 \+ Z+ d) @9 s& b) S
[root@compute01 ~]# ceph osd tree  z5 }# S& F6 ~
ID CLASS WEIGHT  TYPE NAME          STATUS REWEIGHT PRI-AFF 0 c5 i; N% v# O. N9 r
-1       3.00000 root default                              
3 o& W, ]- ~' y% @5 g2 ^% C-5       1.00000     host compute01                         . P! S  y5 [6 G. X1 V# K9 l4 _
1   hdd 1.00000         osd.1          up  1.00000 1.00000
6 N$ U* G8 T$ _% \) T-3       1.00000     host compute02                         ; J! C/ D$ F0 M- a9 R9 p) M8 |5 x( G
0   hdd 1.00000         osd.0          up  1.00000 1.00000
! E/ n2 H& F" V0 p: O  F-7       1.00000     host compute03                         6 K, K' Y' _& N% P1 q
2   hdd 1.00000         osd.2          up  1.00000 1.00000
5 U2 M% S5 B; N8 x5 A; d[root@compute01 ~]# 0 w4 G" L) r* [
2 i9 ^1 d  H9 G3 _8 j, ?* C
$ z) i" F. ~0 \% f: q
您需要登录后才可以回帖 登录 | 开始注册

本版积分规则

关闭

站长推荐上一条 /4 下一条

如有购买积分卡请联系497906712

QQ|返回首页|Archiver|手机版|小黑屋|易陆发现 点击这里给我发消息

GMT+8, 2021-10-28 16:53 , Processed in 0.064975 second(s), 21 queries .

Powered by LR.LINUX.cloud bbs168x X3.2 Licensed

© 2012-2022 Comsenz Inc.

快速回复 返回顶部 返回列表