其他分享
首页 > 其他分享> > Openstack-实践3. CloudStack平台迁移OpenStack平台

Openstack-实践3. CloudStack平台迁移OpenStack平台

作者:互联网

1.升级ceph集群的osd网络(cluster  network)

1.1 线上环境

操作系统:ubuntu14.04 

ceph版本:jewel版本-10.2.11

部署方式:使用ceph-deploy部署。

5个monitor节点  11台osd节点

1.2  osd网络升级

所用的ceph节点(mon/osd/client节点)的/etc/ceph/ceph.conf 

 vim /etc/ceph/ceph.conf    
 [default]
 ......
 public network = 10.78.0.0/16
 cluster network = 10.100.4.0/24 
 .......

mon节点和osd节点所有ceph服务重新启动

restart  ceph-all
#或者
restart ceph-mon-all
restart ceph-osd-all

2.署部署OpenStack对外线上ceph集群

2.1 openstack系统环境

操作系统:centos7.4 

openstack版本:queens

部署方式:kolla,使用外接ceph

备注:openstack的controller节点、compute节点作为ceph的client端使用。

2.2  修改yum源(controller/compute节点)

cd /etc/yum.repos.d/
rm  -rf * 
 
vim CentOS-Base.repo
[base]
name=CentOS-$releasever - Base
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/os/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
 
#released updates
[updates]
name=CentOS-$releasever - Updates
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/updates/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
 
#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/extras/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
 
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos/$releasever/centosplus/$basearch/
#mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
 
vim epel.repo
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
 
[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch/debug
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
 
[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/SRPMS
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1
 
vim  ceph.repo 
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
enabled=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
gpgcheck=0
enabled=1
[ceph-source]
name=cephsource
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
enabled=1
[ceph-radosgw]
name=cephradosgw
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
enabled=1
 
yum clean all 
yum makecache fast  

2.3 安装ceph客户端

ceph-deploy节点
vim /etc/hosts
# controller
10.78.0.11 controller1
10.78.0.12 controller2 
10.78.0.13 controller3
# compute
10.78.0.14 compute01
.....
ssh-copy-id controller1
ssh-copy-id controller2 
ssh-copy-id controller3 
 
ssh-copy-id compute01
 
ceph-deploy install controller1 controller2  controller3 compute01
ceph-deploy admin controller1 controller2  controller3  compute01 
验证 
ceph  -s

  

2.4  部署openstack集群(3控制1计算)

2.4.1 环境准备

#所有节点 (关闭防火墙和selinux)
systemctl  stop  firewalld.serivce 
systemctl disable firewalld.service 
 
vim /etc/selinux/config 
SELINUX=disabled
# 设置hostname
hostnamectl  set-hostname  $HOSTNAME
 
# 关闭NetWorkNamager
systemctl stop NetWorkManager 
systemctl disable NetWorkManager
reboot  

2.4.2 部署机设置

#pip设置
yum install  python2-pip
 
cat <<EOF> /etc/pip.conf
[global]
index-url = http://mirrors.aliyun.com/pypi/simple/
[install]
trusted-host=mirrors.aliyun.com
EOF
pip install -U pip
yum install -y python-devel libffi-devel gcc openssl-devel libselinux-python git
pip install -U 'ansible>=2.2.0'
 
# docker-ce
yum remove docker docker-common docker-selinux docker-engine
yum install -y yum-utils device-mapper-persistent-data lvm2
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
 
yum clean all 
yum makecache fast  
 
yum install docker-ce
# 配置docker镜像加速器
mkdir /etc/docker/
cat << EOF>/etc/docker/daemon.json
{
  "registry-mirrors": ["https://iby0an85.mirror.aliyuncs.com"]
}
EOF
 
systemctl daemon-reload
systemctl start docker-ce
 
# 搭建docker registry(镜像已经编译好)
docker run -d -p /opt/registry/:/var/lib/registry/ -p 4000:5000 --name=registry registry 

2.4.3 kolla-ansible安装 

#kolla-ansible 已上传到部署机 /root/目录下
cd  kolla-ansible  
pip install -r test-requirement.txt -r requirement.txt
python setup.py install 
cp -rv ./etc/kolla/  /etc/
mkdir /etc/kolla/config  
kolla-genpwd
 
vim /etc/kolla/passwords.yml 
keystone_admin_password: otvcloud 

2.4.4 创建pool

ceph osd pool create volumes 128
ceph osd pool set volumes size 3
ceph osd pool create vms 128
ceph osd pool set vms size 3
ceph osd pool create images 64
ceph osd pool set images size 3
ceph osd pool create backups 64
ceph osd pool set backups size 3

2.4.5 开启外接ceph功能

vim /etc/kolla/globals.yml
enable_ceph: "no"
glance_backend_ceph: "yes"
cinder_backend_ceph: "yes"
cinder_backup_driver: "ceph"
nova_backend_ceph: "yes"

2.4.6 为glance配置rbd存储后端

mkdir /etc/kolla/config/{glance,cinder/{cinder-volume,cinder-backup},nova}
 
vim /etc/kolla/config/glance/glance-api.conf 
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
 
#拷贝ceph集群配置文件(/etc/ceph/ceph.conf)到 /etc/kolla/config/glance/ceph.conf
cp /etc/ceph/ceph.conf /etc/kolla/config/glance/ceph.conf  
 
#生成ceph.client.glance.keyring文件,并保存到 /etc/kolla/config/glance 目录
ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' -o ceph.client.glance.keyring 
 
cp ceph.client.glance.keyring  /etc/kolla/config/glance/

2.4.7 为cinder配置rbd存储后端

vim /etc/kolla/config/cinder/cinder-volume.conf
[DEFAULT]
enabled_backends=rbd-1
 
[rbd-1]
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
backend_host = rbd:volumes
rbd_pool = volumes
volume_backend_name = rbd-1
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
 
 
vim /etc/kolla/config/cinder/cinder-backup.conf
[DEFAULT]
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = backups
backup_driver = cinder.backup.drivers.ceph
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
 
#拷贝ceph的配置文件(/etc/ceph/ceph.conf)到 /etc/kolla/config/cinder/ceph.conf
cp /etc/ceph/ceph.conf /etc/kolla/config/cinder
 
#生成 ceph.client.cinder.keyring 文件
ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms ,allow rx pool=images' -O ceph.client.cinder.keyring
 
#生成ceph.client.cinder-backup.keyring文件
ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=bakcups' -O ceph.client.cinder-backup.keyring 
#将ceph.client.cinder-backup.keyring和ceph.client.cinder.keyring拷贝到/etc/kolla/config/cinder/cinder-backup/下面
cp ceph.client.cinder-backup.keyring /etc/kolla/config/cinder/cinder-backup/
cp ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-backup/
#将ceph.client.cinder.keyring 拷贝到 /etc/kolla/cinder/cinder-volume下面
cp ceph.client.cinder.keyring  /etc/kolla/config/cinder/cinder-volume/
 
# 备注:cinder-backup 需要两个 keyring 去连接 volumes 和 backups pool

2.4.8 为nova配置rbd存储后端

vim /etc/kolla/config/nova/nova-compute.conf
[libvirt]
images_rbd_pool = vms
images_type = rbd
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = nova
 
#生成ceph.client.nova.keyring 
ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=vms' -O ceph.client.nova.keyring 
 
# 拷贝ceph.conf/ceph.client.nova.keyring/ceph.client.cinder.keyring到/etc/kolla/config/nova下面
cp /etc/ceph/ceph.conf   /etc/kolla/config/nova
cp ceph.client.nova.keyring /etc/kolla/config/nova/
cp ceph.client.cinder.keyring /etc/kolla/config/nova

 2.4.9 编辑主机配置文件

vim /etc/kolla/globals.yml
连接vpn之后进行拷贝
 
cp /root/kolla-ansible/ansible/inventory/multinode  /root/
 
vim multinode
#根据实际情况修改 (拷贝即可)

2.4.10 部署openstack

kolla使用外接ceph,意味着没有储存节点,而默认情况下cinder-volume和cinder-backup运行在存储节点,外接ceph存储需要指定主机去运行volume和cinder-backup容器。

vim multinode 
[storage]
compute01 
 
kolla-ansible -i /root/multinode bootstrap-servers
kolla-ansible -i /root/multinode prechecks
kolla-ansible -i /root/multinode deploy  
 
kolla-ansible -i /root/multinode post-deploy
 
cp /etc/kolla/admin-openrc.sh  /root/
 
source admin-openrc.sh 
cd /root/kolla-ansible/tools 
vim  init-runonce 
EXT_NET_CIDR='10.0.2.0/24'#外部网络
EXT_NET_RANGE='start=10.0.2.150,end=10.0.2.199' # 浮动IP
EXT_NET_GATEWAY='10.0.2.1'# 外部网络网关
 
/bin/bash init-runonce
openstack server create ...
openstack  network list 
openstack network agent list 
openstack compute service list 
openstack image list 

参考文档:

https://blog.csdn.net/dylloveyou/article/details/79114741

https://blog.csdn.net/dylloveyou/article/details/79054120

https://blog.csdn.net/jfengamarsoft/article/details/77683930

2.5  cloudstack中存储在ceph上的KVM虚拟机迁移到OpenStack上

# 思路:1.迁移kvm虚拟机 
#       2.迁移kvm虚拟机上的数据盘
# 复制各个虚拟机的配置文件到控制节点1上
/bin/bash -x $hostname volumes  即可完成虚拟机的迁移。
#测试:针对centos虚拟机可以正常ping通,但是针对debain虚拟机无法ping通。
 
#!/bin/bash 
# auth:gxw 
hostname=$1
rm -rf /data/hosts/$hostname/backup* 
 
vm_config_list=(`ls /data/hosts/$hostname`)
new_pool=$2
 
for vm_config  in ${vm_config_list[@]}
 do
   vm_name=`echo $vm_config | cut -d '.' -f 1`
   echo $vm_name 
   #filter vm-rbd
   vm_rbd=`grep  rbd /data/hosts/$hostname/$vm_config |  awk '{print $3}' | awk -F '=|/' '{print $3}'| awk -F"'" '{print $1}'| head -1`
 
   vm_rbd_pool=`grep  rbd /data/hosts/$hostname/$vm_config |  awk '{print $3}'  | awk -F "='|/" '{print $2}' | head -1`
   old_pool=$vm_rbd_pool 
   
   vm_rbd_size=`rbd info $old_pool/$vm_rbd | head -2 | tail -1 | awk '{print $2}'`
   vm_rbd_unit=`rbd info $old_pool/$vm_rbd | head -2 | tail -1 | awk '{print $3}'`
   echo $vm_rbd_unit 
   if [ "$vm_rbd_unit"x = "GB"x ]
     then 
 vm_rbd_size_GB=$vm_rbd_size
   else 
        if [ $vm_rbd_size -le 1024 ]
           then 
               #echo "$hostname-$vm_rbd size :$vm_rbd_size less than 1024MB,can't create boot volume! Please  change to another method!"
               #echo "$hostname-$vm_name" >> /root/special_vm
               vm_rbd_size_GB=1  
        else
         vm_rbd_size_GB=`echo $vm_rbd_size/1024 | bc`
        fi 
   fi 
   #echo $vm_rbd $vm_rdb_size_MB $vm_rbd_size 
 
   #exmport  vm_rbd
   backup_vm_rbd=/data/hosts/$hostname/backup.$vm_rbd
 
   rbd export -p $old_pool $vm_rbd $backup_vm_rbd
   
   #create boot start disk
   new_vm_rbd=$hostname-$vm_rbd 
   openstack volume create $new_vm_rbd --size  $vm_rbd_size_GB  --bootable 
   vm_rbd_boot_uuid=`openstack volume list | grep $new_vm_rbd | awk '{print $2}'`
   echo $vm_rbd_boot_uuid 
   rbd rm -p $new_pool volume-$vm_rbd_boot_uuid 
   
   # import vm_rbd
   rbd import -p $new_pool  $backup_vm_rbd volume-$vm_rbd_boot_uuid 
   rm -rf /data/hosts/$hostname/backup* 
 
   #create flavor
   vm_memory_KB=`grep "memory unit" /data/hosts/$hostname/$vm_config | awk -F '>|<' '{print $3}'`
   vm_memory_MB=`echo $vm_memory_KB/1024 | bc`
   vm_vcpus=`grep "vcpu" /data/hosts/$hostname/$vm_config |  tail -1 | awk -F '>|<' '{print $3}'`
   vm_flavor_id=$vm_rbd
   new_vm_flavor_id=$hostname-$vm_flavor_id 
   openstack flavor delete $new_vm_flavor_id 
   #openstack flavor create --id $new_vm_flavor_id   --ram $vm_memory_MB  --vcpus $vm_vcpus --disk $vm_rbd_size $new_vm_flavor_id 
   openstack flavor create --id $new_vm_flavor_id   --ram $vm_memory_MB  --vcpus $vm_vcpus --disk $vm_rbd_size_GB $new_vm_flavor_id 
 
   #create vm 
   new_vm_name=$hostname-$vm_name
   openstack server delete $new_vm_name 
   openstack server create $new_vm_name --volume $vm_rbd_boot_uuid --flavor $new_vm_flavor_id  --security-group 40f3bf48-2889-4be2-b763-e823ba13a652  --nic net-id=eb68f477-8bb1-42cc-b3d5-f89775fed16e
   
  #create data disk 
   data_rbd=`grep rbd /data/hosts/$hostname/$vm_config |  awk '{print $3}' | awk -F '=|/' '{print $3}'| awk -F"'" '{print $1}'| tail -1` 
   echo $data_rbd
   if [ "$data_rbd"x="$vm_rbd"x ]
      then  
           echo "$new_vm_name have not data  disk!"
   else
     data_rbd_pool=`grep  rbd /data/hosts/$hostname/$vm_config |  awk '{print $3}'  | awk -F "='|/" '{print $2}' | tail -1`
     old_pool=$data_rbd_pool 
 
     data_rbd_size=`rbd info $old_pool/$data_rbd | head -2 | tail -1 | awk '{print $2'}`
     data_rbd_unit=`rbd info $old_pool/$vm_rbd | head -2 | tail -1 | awk '{print $3}'`
   
        #echo $data_rbd_unit
       if [ "$data_rbd_unit"x = "GB"x ]
          then 
            data_rbd_size_GB=$data_rbd_size
       else
          if [ $data_rbd_size -le 1024 ]
            then 
               data_rbd_size_GB=1
         else
              data_rbd_size_GB=`echo $data_rbd_size/1024 | bc`
         fi 
       fi
     
     #export  data_rbd
     backup_data_rbd=/data/hosts/$hostname/backup.$data_rbd
     rbd export -p $old_pool $data_rbd $backup_data_rbd
     #create data disk
     new_data_rbd=$hostname-$data_rbd
     openstack volume create $new_data_rbd  --size $data_rbd_size_GB
     data_rbd_uuid=` openstack volume list | grep $new_data_rbd | awk '{print $2}'`
     rbd rm -p $new_pool volume-$data_rbd_uuid 
 
   # import data_rbd
   rbd import -p $new_pool $backup_data_rbd  volume-$data_rbd_uuid
   rm -rf /data/hosts/$hostname/backup*
   
   # attach data_rbd to vm_rbd
    openstack server add volume $new_vm_name  $data_rbd_uuid 
 fi 
   # attch floating ip to virtual server
   openstack floating ip create public1 
   floating_ip=$(openstack floating ip list  | grep None  | head -1 | awk '{print $4}')
   openstack server add floating ip $new_vm_name $floating_ip 
   if [ $? -eq 0 ]
     then 
        rm -rf /data/hosts/$hostname/$vm_config 
   fi 
done  
# http://www.yangguanjun.com/2016/08/03/ceph-rbd-migration/
# http://fishcried.com/2016-03-09/dive-into-rbd/
# https://docs.openstack.org/install-guide/launch-instance-selfservice.html

  

2.5.1  centos7 忘记密码 怎样处理?

1)重启系统,进入暂停后的页:

2)将光标一直移动到 LANG=en_US.UTF-8 后面,空格,再追加init=/bin/sh 注意是在同一行。

3)安装ctrl+x进入启动界面

sh-4.2#

4)输入如下命令

  1.  mount -o remount ,rw /
  2.  passwd root
  3. #若开启selinux,进行如下设置
  4. touch /.autorelabel
  5. exec /sbin/init  或者 exec /sbin/reboot 

参考文档:

https://blog.csdn.net/wcy00q/article/details/70570043

2.6 扩展compute节点

1.环境准备 (2.4.1节)
2.配置yum源(2.2节)
3.安装ceph客户端 (2.3节)
4. 修改 /root/multinode的配置文件  添加计算节点 
kolla-ansible -i /root/multinode bootstrap-servers
kolla-ansible -i /root/multinode prechecks
kolla-ansible -i /root/multinode deploy  
 
openstack compute service list | grep nova-compute 

  

3 ceph集群更改日志盘位置

3.1 分区规划及创建

     生产环境中每个 OSD节点新加入4个480G的SSD盘,想将ceph的日志存储到SSD上。

    需要对4块SSD进行分区,分区多少合适?

    cephbook 推荐SSD作为日志盘的话,日志盘的大小为10G即可。 

    此处我们每个盘的分区大小为50G,使用如下命令,并给每个分区做标记,分区的标记不可以重复。

parted /dev/sdo -s mklabel gpt mkpart journal_8 0 50G \
-s mkpart journal_9 50G 100G \
-s mkpart journal_10 100G 150G \
-s mkpart journal_11 150G 200G \
-s mkpart journal_12 200G 250G \
-s mkpart journal_13 250G 300G \
-s mkpart journal_14 300G 350G \
-s mkpart journal_15 350G 400G \
-s mkpart journal_16 400G 450G 
 
parted /dev/sdp -s mklabel gpt mkpart journal_17 0 50G \
-s mkpart journal_18 50G 100G \
-s mkpart journal_19 100G 150G \
-s mkpart journal_20 150G 200G \
-s mkpart journal_21 200G 250G \
-s mkpart journal_22 250G 300G \
-s mkpart journal_23 300G 350G \
-s mkpart journal_24 350G 400G \
-s mkpart journal_25 400G 450G 
 
 
parted /dev/sdq -s mklabel gpt mkpart journal_26 0 50G \
-s mkpart journal_27 50G 100G \
-s mkpart journal_28 100G 150G \
-s mkpart journal_29 150G 200G \
-s mkpart journal_30 200G 250G \
-s mkpart journal_31 250G 300G \
-s mkpart journal_32 300G 350G \
-s mkpart journal_33 350G 400G \
-s mkpart journal_34 400G 450G 

3.2 升级日志盘

#!/bin/bash
#date:2018-2-15 15:53
# author:gxw 
#email: hangtiangazi@163.com
set -e 
/usr/bin/ceph osd set noout 
 
PARTUUIDDIR=/dev/disk/by-partuuid
OSDS=($(lsblk | grep ceph | awk -F'/|-' '{print $NF}'))
 
Journal_Devices=(sdn1 sdn2 sdn3 sdn4 sdn5 sdn6 sdn7 sdn8 sdo1 sdo3 sdo4 sdo5 sdo6 sdo7 sdo8 sdo9 sdp1 sdp2 sdp3 sdp4 sdp5 sdp6 sdp7 sdp8 sdp9 sdq1 sdq2)
 
for i in {0..26}
#for i in 0
do
  Journal_Device=${Journal_Devices[$i]}
  OSD_ID=${OSDS[$i]}
 
  # Equry Journal Device UUID
  UUID=$(ls -l $PARTUUIDDIR  | grep $Journal_Device | awk '{print $9}')
  OSD_Journal=/var/lib/ceph/osd/ceph-$OSD_ID/journal
  stop ceph-osd id=$OSD_ID
  ceph-osd -i $OSD_ID  --flush-journal
 
  rm -rf $OSD_Journal
  ln -s $PARTUUIDDIR/$UUID $OSD_Journal
  chown ceph:ceph $OSD_Journal
  echo $UUID  > /var/lib/ceph/osd/ceph-$OSD_ID/journal_uuid
  ceph-osd -i $OSD_ID --mkjournal
  start ceph-osd id=$OSD_ID
done
ceph osd unset noout 

  

标签:平台,vm,ceph,etc,CloudStack,cinder,OpenStack,rbd,kolla
来源: https://www.cnblogs.com/jonc/p/11107429.html