raid5+lvm随笔
作者:互联网
1.准备磁盘,先做raid,再做lvm;
/dev/sdb /dev/sdc /dev/sdd /dev/sde
[root@localhost ~]# mdadm -C -v /dev/md5 -l 5 -n 3 /dev/sdb /dev/sdc /dev/sdd -x1 /dev/sde
mdadm: layout defaults to left-symmetric
mdadm: layout defaults to left-symmetric
mdadm: chunk size defaults to 512K
mdadm: /dev/sdc appears to be part of a raid array:
level=raid5 devices=3 ctime=Mon Apr 26 09:14:34 2021
mdadm: /dev/sdd appears to be part of a raid array:
level=raid5 devices=3 ctime=Mon Apr 26 09:14:34 2021
mdadm: /dev/sde appears to be part of a raid array:
level=raid5 devices=3 ctime=Mon Apr 26 09:14:34 2021
mdadm: size set to 20954112K
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md5 started.
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdc 8:32 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdd 8:48 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sde 8:64 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdf 8:80 0 20G 0 disk
sdg 8:96 0 20G 0 disk
sdh 8:112 0 20G 0 disk
sdi 8:128 0 20G 0 disk
sdj 8:144 0 20G 0 disk
sr0 11:0 1 4.4G 0 rom
[root@localhost ~]# mdadm -D /dev/md5
/dev/md5:
Version : 1.2
Creation Time : Mon Apr 26 09:46:35 2021
Raid Level : raid5
Array Size : 41908224 (39.97 GiB 42.91 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 3
Total Devices : 4
Persistence : Superblock is persistent
Update Time : Mon Apr 26 09:47:27 2021
State : clean, degraded, recovering
Active Devices : 2
Working Devices : 4
Failed Devices : 0
Spare Devices : 2
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Rebuild Status : 51% complete
Name : localhost.localdomain:5 (local to host localhost.localdomain)
UUID : 92dfb562:ea25fecb:52439bb4:b2621bea
Events : 9
Number Major Minor RaidDevice State
0 8 16 0 active sync /dev/sdb
1 8 32 1 active sync /dev/sdc
4 8 48 2 spare rebuilding /dev/sdd
3 8 64 - spare /dev/sde
带同步好了后做lvm
[root@localhost ~]# mdadm -D /dev/md5
/dev/md5:
Version : 1.2
Creation Time : Mon Apr 26 09:46:35 2021
Raid Level : raid5
Array Size : 41908224 (39.97 GiB 42.91 GB)
Used Dev Size : 20954112 (19.98 GiB 21.46 GB)
Raid Devices : 3
Total Devices : 4
Persistence : Superblock is persistent
Update Time : Mon Apr 26 09:48:21 2021
State : clean
Active Devices : 3
Working Devices : 4
Failed Devices : 0
Spare Devices : 1
Layout : left-symmetric
Chunk Size : 512K
Consistency Policy : resync
Name : localhost.localdomain:5 (local to host localhost.localdomain)
UUID : 92dfb562:ea25fecb:52439bb4:b2621bea
Events : 18
Number Major Minor RaidDevice State
0 8 16 0 active sync /dev/sdb
1 8 32 1 active sync /dev/sdc
4 8 48 2 active sync /dev/sdd
3 8 64 - spare /dev/sde
做pv
[root@localhost ~]# pvcreate -ff /dev/md5
Really INITIALIZE physical volume "/dev/md5" of volume group "datavg2" [y/n]? y
WARNING: Forcing physical volume creation on /dev/md5 of volume group "datavg2"
Wiping xfs signature on /dev/md5.
Physical volume "/dev/md5" successfully created.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/md5 lvm2 --- <39.97g <39.97g
/dev/sda2 centos lvm2 a-- <39.00g 4.00m
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdc 8:32 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdd 8:48 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sde 8:64 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
sdf 8:80 0 20G 0 disk
sdg 8:96 0 20G 0 disk
sdh 8:112 0 20G 0 disk
sdi 8:128 0 20G 0 disk
sdj 8:144 0 20G 0 disk
sr0 11:0 1 4.4G 0 rom
创建vg
[root@localhost ~]# vgcreate vg2 /dev/md5
Volume group "vg2" successfully created
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
centos 1 2 0 wz--n- <39.00g 4.00m
vg2 1 0 0 wz--n- 39.96g 39.96g
创建lvs
[root@localhost ~]# lvcreate -L 30G -n lvs3 vg2
Logical volume "lvs3" created.
[root@localhost ~]# lvdisplay /dev/vg2/lvs3
--- Logical volume ---
LV Path /dev/vg2/lvs3
LV Name lvs3
VG Name vg2
LV UUID Cp9NBj-c1sq-wME3-DXQt-L6Zw-0EUj-53iCGo
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2021-04-26 09:58:20 +0800
LV Status available
# open 0
LV Size 30.00 GiB
Current LE 7680
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:2
[root@localhost ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 40G 0 disk
├─sda1 8:1 0 1G 0 part
└─sda2 8:2 0 39G 0 part
├─centos-root 253:0 0 37G 0 lvm /
└─centos-swap 253:1 0 2G 0 lvm [SWAP]
sdb 8:16 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
└─vg2-lvs3 253:2 0 30G 0 lvm
sdc 8:32 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
└─vg2-lvs3 253:2 0 30G 0 lvm
sdd 8:48 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
└─vg2-lvs3 253:2 0 30G 0 lvm
sde 8:64 0 20G 0 disk
└─md5 9:5 0 40G 0 raid5
└─vg2-lvs3 253:2 0 30G 0 lvm
sdf 8:80 0 20G 0 disk
sdg 8:96 0 20G 0 disk
sdh 8:112 0 20G 0 disk
sdi 8:128 0 20G 0 disk
sdj 8:144 0 20G 0 disk
sr0 11:0 1 4.4G 0 rom
格式化,挂载
[root@localhost ~]# mkfs.xfs /dev/vg2/lvs3
meta-data=/dev/vg2/lvs3 isize=512 agcount=16, agsize=491392 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=7862272, imaxpct=25
= sunit=128 swidth=256 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=3840, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mkdir /datatest
[root@localhost ~]# mount /dev/vg2/lvs3 /datatest/
[root@localhost ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 7.7M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/mapper/centos-root 37G 16G 22G 43% /
/dev/mapper/vg2-lvs3 30G 33M 30G 1% /datatest
写点东西进去
[root@localhost datatest]# echo a> 1
[root@localhost datatest]# cat *
a
lvm扩容增加一点:
[root@localhost datatest]# lvextend -L +1G /dev/vg2/lvs3
Size of logical volume vg2/lvs3 changed from 30.00 GiB (7680 extents) to 31.00 GiB (7936 extents).
Logical volume vg2/lvs3 successfully resized.
###[root@localhost datatest]# resize2fs /dev/vg2/lvs3 ##ext4刷新
[root@localhost datatest]# xfs_growfs /dev/vg2/lvs3 ###xfs格式刷新
meta-data=/dev/mapper/vg2-lvs3 isize=512 agcount=16, agsize=491392 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=7862272, imaxpct=25
= sunit=128 swidth=256 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=3840, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 7862272 to 8126464
[root@localhost datatest]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 7.7M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/mapper/centos-root 37G 16G 22G 43% /
/dev/mapper/vg2-lvs3 31G 33M 31G 1% /datatest
lvm按百分率扩容:
[root@localhost datatest]# lvextend -l +100%FREE /dev/vg2/lvs3
Size of logical volume vg2/lvs3 changed from 31.00 GiB (7936 extents) to 39.96 GiB (10231 extents).
Logical volume vg2/lvs3 successfully resized.
[root@localhost datatest]# xfs_growfs /dev/vg2/lvs3
meta-data=/dev/mapper/vg2-lvs3 isize=512 agcount=17, agsize=491392 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=8126464, imaxpct=25
= sunit=128 swidth=256 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=3840, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 8126464 to 10476544
[root@localhost datatest]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 7.7M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/mapper/centos-root 37G 16G 22G 43% /
/dev/mapper/vg2-lvs3 40G 34M 40G 1% /datatest
#########
删除逻辑卷,先卸载
umount /dev/datavg/lv1
lvremove /dev/datavg/lv1
删除vg
vgremove datavg
删除pv
pvremove /dev/sdb
pvremove /dev/sdc
#########扩大卷组,加磁盘式######
[root@~]# pvcreate /dev/sdc
[root@~]# vgs
VG #PV #LV #SN Attr VSize VFree
datavg 1 1 0 wz--n- 1020.00m 920.00m
扩大
[root@~]# vgextend datavg /dev/sdc
Volume group "datavg" successfully extended
查看
[root@~]# vgs
VG #PV #LV #SN Attr VSize VFree
datavg 2 1 0 wz--n- 1.99g 1.89g
##########在线迁移########
mkfs.xfs -f /dev/sdi
pvcreate /dev/sdi
vgextend datavf /dev/sdi
pvmove /dev/sde /dev/sdi
标签:disk,dev,raid5,vg2,20G,lvm,随笔,root,localhost 来源: https://www.cnblogs.com/chen-ling/p/14703506.html