磁盘基本管理
snow chuai汇总、整理、撰写---2020/2/13
最新更新日期---2020/08/30
1.磁盘限额
1.1 基于ext4的磁盘限额实现
1.1.1 基于ext4的磁盘限额实现-前期准备
1) 本次实验需要1块硬盘作为测试盘,也可以使用系统盘(实验之中不推荐)
2) 本次实验目标盘为vda1,文件系统为ext4
3) 安装quota
[root@localhost ~]# yum install quota -y
4) 启用quota-临时(本次有效)
[root@localhost ~]# mkdir /mnt/files
# 启用用户限额及组限额
[root@localhost ~]# mount -o usrquota,grpquota /dev/vda1 /mnt/files
# 确认usrquota权限已被加载
[root@localhost ~]# mount | grep /mnt/files
/dev/vda1 on /mnt/files type ext4 (rw,relatime,quota,usrquota,grpquota,data=ordered)
# 更改属主及属组为snow
[root@localhost ~]# chown snow. -R /mnt/files
5) 启用quota-永久
[root@localhost ~]# blkid | grep vda1
/dev/vda1: UUID="33f6a167-2db8-4e05-ba33-39544d5b419a" TYPE="ext4"
[root@localhost ~]# vim /etc/fstab
......
......
UUID=33f6a167-2db8-4e05-ba33-39544d5b419a /mnt/files ext4 defaults,usrquota,grpquota 1 2
|
1.1.2 基于ext4的磁盘限额实现-用户配额实现
1) 创建quota配置并启用用户限额
[root@localhost ~]# quotacheck -um /mnt/files
[root@localhost ~]# quotaon -uv /mnt/files
/dev/vda1 [/mnt/files]: user quotas turned on
2) 确认quota状态
[root@localhost ~]# quotaon -ap
group quota on /mnt/files (/dev/vda1) is off
user quota on /mnt/files (/dev/vda1) is on
3) 对用户snow设置限额
[root@localhost ~]# edquota -u snow
设置snow账户可用空间为30M,默认单位为KB
Disk quotas for user snow (uid 1000):
Filesystem blocks soft hard inodes soft hard
/dev/vda1 0 0 30000 0 0 0
4) 确认对snow设置的限额
[root@localhost ~]# repquota -au
*** Report for user quotas on device /dev/vda1
Block grace time: 7days; Inode grace time: 7days
Block limits File limits
User used soft hard grace used soft hard grace
----------------------------------------------------------------------
snow -- 20 0 30000 2 0 0
|
1.1.3 基于ext4的磁盘限额实现-组配额实现
1) 创建数据库并开启组配额
[root@localhost ~]# quotacheck -gm /mnt/files
[root@localhost ~]# quotaon -gv /mnt/files
/dev/vda1 [/mnt/files]: group quotas turned on
[root@localhost ~]# quotaon -ap
group quota on /mnt/files (/dev/vda1) is on
user quota on /mnt/files (/dev/vda1) is on
2) 配置组配额
[root@localhost ~]# edquota -g snow
Disk quotas for group snow (gid 1000):
Filesystem blocks soft hard inodes soft hard
/dev/vda1 20 0 500000 2 0 0
[root@localhost ~]# repquota -ag
*** Report for group quotas on device /dev/vda1
Block grace time: 7days; Inode grace time: 7days
Block limits File limits
Group used soft hard grace used soft hard grace
----------------------------------------------------------------------
snow -- 20 0 500000 2 0 0
|
1.1.4 设置邮件告警
1) 设定监控目录
[root@localhost ~]# yum install quota-warnquota -y
[root@localhost ~]# vim /etc/quotatab
/dev/vda1: /mnt/files
[root@localhost ~]# sed -i -e "s/example\.com/1000cc\.net/g" /etc/warnquota.conf
[root@localhost ~]# warnquota -s
You have new mail in /var/spool/mail/root
|
1.2 基于xfs的磁盘限额实现
1.2.1 基于xfs的磁盘限额实现-前期准备
1) 本次实验需要1块硬盘作为测试盘,也可以使用系统盘(实验之中不推荐)
2) 本次实验目标盘为vda1,文件系统为xfs
3) 安装quota
[root@localhost ~]# yum install quota -y
4) 启用quota-临时(本次有效)
[root@localhost ~]# mkdir /mnt/files
# 启用用户限额及组限额
[root@localhost ~]# mount -o usrquota,grpquota /dev/vda1 /mnt/files
[root@localhost ~]# mount | grep /mnt/files
/dev/vda1 on /mnt/files type xfs (rw,relatime,attr2,inode64,usrquota,grpquota)
[root@localhost ~]# chown snow. -R /mnt/files/
5) 启用quota-永久
[root@localhost ~]# blkid | grep vda1
/dev/vda1: UUID="56a895f8-a004-4ace-9bff-327a7b71bf4e" TYPE="xfs"
[root@localhost ~]# vim /etc/fstab
......
......
UUID=56a895f8-a004-4ace-9bff-327a7b71bf4e /mnt/files xfs defaults,usrquota,grpquota 1 2
|
1.2.2 基于xfs的磁盘限额实现-用户配额实现
1) 创建quota配置并启用用户限额(挂载中必须含有usrquota及grpquota权限)
[root@localhost ~]# xfs_quota -x /mnt/files
# 确认当前状态
xfs_quota> state
User quota state on /mnt/files (/dev/vda1)
Accounting: ON
Enforcement: ON
Inode: #67 (1 blocks, 1 extents)
Group quota state on /mnt/files (/dev/vda1)
Accounting: ON
Enforcement: ON
Inode: #68 (1 blocks, 1 extents)
Project quota state on /mnt/files (/dev/vda1)
Accounting: OFF
Enforcement: OFF
Inode: #68 (1 blocks, 1 extents)
Blocks grace time: [7 days]
Inodes grace time: [7 days]
Realtime Blocks grace time: [7 days]
# 显示使用报告
xfs_quota> report -h
User quota on /mnt/files (/dev/vda1)
Blocks
User ID Used Soft Hard Warn/Grace
---------- ---------------------------------
root 0 0 0 00 [------]
Group quota on /mnt/files (/dev/vda1)
Blocks
Group ID Used Soft Hard Warn/Grace
---------- ---------------------------------
root 0 0 0 00 [------]
# 设置snow账户软限额为20M,硬限额为30M
xfs_quota> limit bsoft=20m bhard=30m snow
# 确认设置
xfs_quota> report -h -u
User quota on /mnt/files (/dev/vda1)
Blocks
User ID Used Soft Hard Warn/Grace
---------- ---------------------------------
root 0 0 0 00 [------]
snow 0 20M 30M 00 [------]
xfs_quota>quit
|
1.2.3 基于xfs的磁盘限额实现-组配额实现
[root@localhost ~]# xfs_quota -x -c 'limit -g bsoft=1g bhard=2g snow' /mnt/files
[root@localhost ~]# xfs_quota -x -c 'report -h -g' /mnt/files
Group quota on /mnt/files (/dev/vda1)
Blocks
Group ID Used Soft Hard Warn/Grace
---------- ---------------------------------
root 0 0 0 00 [------]
snow 0 1G 2G 00 [------]
|
1.2.4 设置邮件告警
1) 设定监控目录
[root@localhost ~]# yum install quota-warnquota -y
[root@localhost ~]# vim /etc/quotatab
/dev/vda1: /mnt/files
[root@localhost ~]# sed -i -e "s/example\.com/1000cc\.net/g" /etc/warnquota.conf
[root@localhost ~]# warnquota -s
You have new mail in /var/spool/mail/root
|
2. RAID实现
2.1 RAID1
1) 本实验采用2块磁盘,为vda/vdb
2) 创建新分区并标记为RAID类型
# mklabel 可以为msdos
[root@localhost ~]# parted --script /dev/vda "mklabel gpt"
[root@localhost ~]# parted --script /dev/vdb "mklabel gpt"
# 可以按GB进行划分: parted --script /dev/sdb unit GB "mkpart primary 0 2"
[root@localhost ~]# parted --script /dev/vda "mkpart primary 0% 100%"
[root@localhost ~]# parted --script /dev/vdb "mkpart primary 0% 100%"
[root@localhost ~]# parted --script /dev/vda "set 1 raid on"
[root@localhost ~]# parted --script /dev/vdb "set 1 raid on"
3) 配置RAID1
[root@localhost ~]# yum install mdadm -y
[root@localhost ~]# mdadm --create /dev/md0 --level=raid1 --raid-devices=2 /dev/vda1 /dev/vdb1
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@localhost ~]# cat /proc/mdstat
# 同步状态
Personalities : [raid1]
md0 : active raid1 vdb1[1] vda1[0]
20952064 blocks super 1.2 [2/2] [UU]
[====>................] resync = 20.2% (4244352/20952064) finish=3.3min speed=81991K/sec
unused devices: <none>
# 同步结束
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid1]
md0 : active raid1 vdb1[1] vda1[0]
20952064 blocks super 1.2 [2/2] [UU]
unused devices: <none>
4) 将要巡检的RAID加入至cron中
[root@localhost ~]# vim +57 /etc/sysconfig/raid-check
......
......
# 57行,将需要巡检的设备加入
CHECK_DEVS="md0"
......
......
5) 故障处理
# vdb1丢失
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid1]
md0 : active (auto-read-only) raid1 vda1[0]
83818496 blocks super 1.2 [2/1] [U_]
unused devices: <none>
# 重新配置RAID1
[root@localhost ~]# mdadm --manage /dev/md0 --add /dev/vdb1
mdadm: added /dev/vdb1
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid1]
md0 : active raid1 vda1[1] vdb1[0]
83818496 blocks super 1.2 [2/2] [UU]
[======>..............] resync = 31.0% (26044416/83818496) finish=4.7min speed=201190K/sec
unused devices: <none>
6) 删除RAID1
[root@localhost ~]# mdadm -S /dev/md0
mdadm: stopped /dev/md0
[root@localhost ~]# mdadm --misc --zero-superblock /dev/vda1
[root@localhost ~]# mdadm --misc --zero-superblock /dev/vdb1
|
2.2 RAID5
1) 本实验采用2块磁盘,为vda/vdb/vdc
2) 创建新分区并标记为RAID类型
[root@localhost ~]# parted --script /dev/vda "mklabel gpt"
[root@localhost ~]# parted --script /dev/vdb "mklabel gpt"
[root@localhost ~]# parted --script /dev/vdc "mklabel gpt"
[root@localhost ~]# parted --script /dev/vda "mkpart primary 0% 100%"
[root@localhost ~]# parted --script /dev/vdb "mkpart primary 0% 100%"
[root@localhost ~]# parted --script /dev/vdc "mkpart primary 0% 100%"
[root@localhost ~]# parted --script /dev/vda "set 1 raid on"
[root@localhost ~]# parted --script /dev/vdb "set 1 raid on"
[root@localhost ~]# parted --script /dev/vdc "set 1 raid on"
3) 配置RAID5
[root@localhost ~]# yum install mdadm -y
[root@localhost ~]# mdadm --create /dev/md0 --level=raid5 --raid-devices=3 /dev/vda1 /dev/vdb1 /dev/vdc1
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md0 started.
[root@localhost ~]# cat /proc/mdstat
# 同步状态
Personalities : [raid1] [raid6] [raid5] [raid4]
md0 : active raid5 vdc1[3] vdb1[1] vda1[0]
41904128 blocks super 1.2 level 5, 512k chunk, algorithm 2 [3/2] [UU_]
[=>...................] recovery = 5.5% (1156736/20952064) finish=4.2min speed=77115K/sec
unused devices: <none>
# 同步结束
[root@localhost ~]# cat /proc/mdstat
Personalities : [raid1] [raid6] [raid5] [raid4]
md0 : active raid5 vdc1[3] vdb1[1] vda1[0]
41904128 blocks super 1.2 level 5, 512k chunk, algorithm 2 [3/3] [UUU]
unused devices: <none>
|
3. LUKS实现
3.1 实现LUKS
1) 创建加密设备
# 注意大小写
[root@localhost ~]# cryptsetup luksFormat /dev/vda1
WARNING!
========
This will overwrite data on /dev/vda1 irrevocably.
Are you sure? (Type uppercase yes): YES
Enter passphrase for /dev/vda1: # 设定加密口令
Verify passphrase:
2) 设定加密分区的映射名
[root@localhost ~]# cryptsetup luksOpen /dev/vda1 snowsecret
Enter passphrase for /dev/vda1: # 输入加密口令
3) 确认状态
[root@localhost ~]# ll /dev/mapper/snowsecret
lrwxrwxrwx 1 root root 7 Feb 14 01:01 /dev/mapper/snowsecret -> ../dm-2
4) 格式化并挂载
[root@localhost ~]# mkfs.ext4 /dev/mapper/snowsecret
[root@localhost ~]# mount /dev/mapper/snowsecret /mnt
[root@localhost ~]# df -Th | grep /mnt
/dev/mapper/snowsecret ext4 20G 45M 19G 1% /mnt
5) 查看加密分区映射对应的分区
[root@localhost ~]# cryptsetup status snowsecret
/dev/mapper/snowsecret is active and is in use.
type: LUKS1
cipher: aes-xts-plain64
keysize: 256 bits
key location: dm-crypt
device: /dev/vda1
sector size: 512
offset: 4096 sectors
size: 41936896 sectors
mode: read/write
|
3.2 实现自启动LUKS分区
[root@localhost ~]# touch /root/cryptpasswd
[root@localhost ~]# cryptsetup luksAddKey /dev/sdb1 /root/cryptpasswd
Enter any passphrase: # 输入加密口令
[root@localhost ~]# vim /etc/crypttab
格式说明:映射名 分区 密码本
snowsecret /dev/vda1 /root/cryptpasswd
[root@localhost ~]# vim /etc/fstab
......
......
......
......
......
......
/dev/mapper/snowsecret /mnt ext4 defaults 1 2
|
3.3 关闭LUKS
[root@localhost ~]# umount /mnt
[root@localhost ~]# cryptsetup luksClose /dev/mapper/snowsecret
|
4. LVM实现
4.1 创建LVM
1) 本实验三块磁盘vda/vdb/vdc
2) 创建PV
[root@localhost ~]# pvcreate /dev/vd[a-b]1
Physical volume "/dev/vda1" successfully created.
Physical volume "/dev/vdb1" successfully created.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vda1 lvm2 --- <20.00g <20.00g
/dev/vdb1 lvm2 --- <20.00g <20.00g
3) 创建VG
[root@localhost ~]# vgcreate snowvg /dev/vd[a-b]1
Volume group "snowvg" successfully created
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
snowvg 2 0 0 wz--n- 39.99g 39.99g
4) 创建LV
[root@localhost ~]# lvcreate -L 30G -n lisalv snowvg
Logical volume "lisalv" created.
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lisalv snowvg -wi-a----- 30.00g
5) 使用LV
[root@localhost ~]# mkfs.ext4 /dev/snowvg/lisalv
[root@localhost ~]# mount /dev/snowvg/lisalv /mnt/
[root@localhost ~]# df -Th | grep /mnt
/dev/mapper/snowvg-lisalv ext4 30G 45M 28G 1% /mnt
|
4.2 扩容/缩减
4.2.1 扩容/缩减LV-EXT4
1) 扩容LV(由上述的30G增加到32G)
[root@localhost ~]# lvextend -L +2G -f -r /dev/snowvg/lisalv
Size of logical volume snowvg/lisalv changed from 30.00 GiB (7680 extents) to 32.00 GiB (8192 extents).
Logical volume snowvg/lisalv successfully resized.
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/mapper/snowvg-lisalv is mounted on /mnt; on-line resizing required
old_desc_blocks = 4, new_desc_blocks = 4
The filesystem on /dev/mapper/snowvg-lisalv is now 8388608 blocks long.
[root@localhost ~]# df -Th | grep /mnt
/dev/mapper/snowvg-lisalv ext4 32G 45M 22G 1% /mnt
2) 缩减LV(由上述的32G缩减到23G)
[root@localhost ~]# lvreduce -L -9G -f -r /dev/snowvg/lisalv
Do you want to unmount "/mnt" ? [Y|n] y
fsck from util-linux 2.23.2
/dev/mapper/snowvg-lisalv: 11/2097152 files (0.0% non-contiguous), 176706/8388608 blocks
resize2fs 1.42.9 (28-Dec-2013)
Resizing the filesystem on /dev/mapper/snowvg-lisalv to 6029312 (4k) blocks.
The filesystem on /dev/mapper/snowvg-lisalv is now 6029312 blocks long.
Size of logical volume snowvg/lisalv changed from 32.00 GiB (8192 extents) to 23.00 GiB (5888 extents).
Logical volume snowvg/lisalv successfully resized.
[root@localhost ~]# df -Th | grep /mnt
/dev/mapper/snowvg-lisalv ext4 23G 45M 22G 1% /mnt
|
4.2.2 扩容LV-XFS
1) 缩减LV--XFS不支持
# 如果要收缩某个LV,就必须删除掉。但是需要做好备份
(1) 需要安装xfsdump工具
(2) 做好备份(语法: -f 备份文档 备份的目录)
[root@localhost ~]# xfsdump -f /mnt.xfsdump /mnt
(3) 待创建好新的lv后,在进行恢复(语法: -f 恢复文档 恢复的目的目录)
[root@localhost ~]# xfsrestore -f /home.xfsdump /home
2) 增加LV
[root@localhost ~]# df -Th | grep /mnt
/dev/mapper/snowvg-lisalv xfs 23G 33M 23G 1% /mnt
# 扩展LV
[root@localhost ~]# lvextend -L +5G /dev/snowvg/lisalv
Size of logical volume snowvg/lisalv changed from 23.00 GiB (5888 extents) to 28.00 GiB (7168 extents).
Logical volume snowvg/lisalv successfully resized.
# 延展XFS
[root@localhost ~]# xfs_growfs /mnt
meta-data=/dev/mapper/snowvg-lisalv isize=512 agcount=4, agsize=1507328 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0 spinodes=0
data = bsize=4096 blocks=6029312, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal bsize=4096 blocks=2944, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 6029312 to 7340032
[root@localhost ~]# df -Th | grep /mnt
/dev/mapper/snowvg-lisalv xfs 28G 33M 28G 1% /mnt
|
4.2.3 扩容/缩减VG与PV
1) 扩容VG
(1) 增加PV
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vda1 snowvg lvm2 a-- <20.00g 0
/dev/vdb1 snowvg lvm2 a-- <20.00g 16.99g
[root@localhost ~]# pvcreate /dev/vdc1
Physical volume "/dev/vdc1" successfully created.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vda1 snowvg lvm2 a-- <20.00g 0
/dev/vdb1 snowvg lvm2 a-- <20.00g 16.99g
/dev/vdc1 lvm2 --- <20.00g <20.00g
(2) 扩容VG
[root@localhost ~]# vgextend snowvg /dev/vdc1
Volume group "snowvg" successfully extended
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
snowvg 3 1 0 wz--n- <59.99g <36.99g
2) 缩减VG
(1) 确保数据能够腾出一块磁盘
(2) 缩减vg
[root@localhost ~]# vgreduce snowvg /dev/vdc1
Removed "/dev/vdc1" from volume group "snowvg"
[root@localhost ~]# vgs
VG #PV #LV #SN Attr VSize VFree
snowvg 2 1 0 wz--n- 39.99g 16.99g
(3) 缩减pv
[root@localhost ~]# pvremove /dev/vdc1
Labels on physical volume "/dev/vdc1" successfully wiped.
[root@localhost ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vda1 snowvg lvm2 a-- <20.00g 0
/dev/vdb1 snowvg lvm2 a-- <20.00g 16.99g
[root@localhost ~]#
|
4.3 LVM快照
1) 创建快照,快照的名字为slisalv
[root@localhost ~]# lvcreate -L 100M -s -n slisalv /dev/snowvg/lisalv
Logical volume "slisalv" created.
2) 使用快照(快照可以直接使用,无需其他操作)
[root@localhost ~]# mount /dev/snowvg/slisalv /mnt
[root@localhost ~]# df -Th | grep /mnt
/dev/mapper/snowvg-slisalv xfs 28G 33M 28G 1% /mnt
|
5. 创建条带型LV
1) 创建条带LV
[root@localhost ~]# pvcreate /dev/vd[a-b]1
Physical volume "/dev/vda1" successfully created.
Physical volume "/dev/vdb1" successfully created.
[root@localhost ~]# vgcreate snow_striped /dev/vd[a-b]1
Volume group "snow_striped" successfully created
# 参数说明:
-i : 设定制作条带的磁盘数量
-I : 指定多少K字节做一个条带区块
[root@localhost ~]# lvcreate -l 100%FREE -i 2 -I 64 -n lisa_striped snow_striped
Wiping xfs signature on /dev/snow_striped/lisa_striped.
Logical volume "lisa_striped" created.
2) 确认条带LV
[root@localhost ~]# lvdisplay
--- Logical volume ---
LV Path /dev/snow_striped/lisa_striped
LV Name lisa_striped
VG Name snow_striped
LV UUID VGtcKS-4UYd-yvfw-eiOB-OMSk-5hGs-WkzHjH
LV Write Access read/write
LV Creation host, time localhost.1000cc.net, 2020-02-14 02:20:23 +0800
LV Status available
# open 0
LV Size 39.99 GiB
Current LE 10238
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:2
|
6. 镜像型LV实现与管理
6.1 创建镜像型LV
[root@localhost ~]# vgcreate snowvg_mirror /dev/vd[a-b]1
Volume group "snowvg_mirror" successfully created
[root@localhost ~]# lvcreate -L 4G -m1 -n lisalv_mirror snowvg_mirror
Logical volume "lisalv_mirror" created.
# 查看Cpy%Sync正在同步
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lisalv_mirror snowvg_mirror rwi-a-r--- 4.00g 31.48
# 查看Cpy%Sync同步完成
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lisalv_mirror snowvg_mirror rwi-a-r--- 4.00g 100.00
|
6.2 对现有LV添加镜像能力
[root@localhost ~]# pvcreate /dev/vdc1
[root@localhost ~]# vgextend snowvg /dev/vdc1
[root@localhost ~]# lvconvert -m1 /dev/snowvg/lisalv /dev/vdc1
|
6.3 修改故障的镜像LV
1) 确认当前镜像LV的状态
[root@localhost ~]# lvs -a -o +devices
LV VG Attr LSize ......Cpy%Sync Convert Devices
lisalv_mirror snowvg_mirror rwi-a-r--- 4.00g 100.00 lisalv_....
[lisalv_mirror_rimage_0] snowvg_mirror iwi-aor--- 4.00g /dev/vda1(1)
[lisalv_mirror_rimage_1] snowvg_mirror iwi-aor--- 4.00g /dev/vdb1(1)
[lisalv_mirror_rmeta_0] snowvg_mirror ewi-aor--- 4.00m /dev/vda1(0)
[lisalv_mirror_rmeta_1] snowvg_mirror ewi-aor--- 4.00m /dev/vdb1(0)
2) 模拟破坏某个PV
[root@localhost ~]# dd if=/dev/zero of=/dev/vdb1 count=10
3) 确认破坏状态
[root@localhost ~]# lvs -a -o +devices
LV VG Attr LSize ......Cpy%Sync Convert Devices
lisalv_mirror snowvg_mirror rwi-a-r--- 4.00g 100.00 lisalv_....
[lisalv_mirror_rimage_0] snowvg_mirror iwi-aor--- 4.00g /dev/vda1(1)
[lisalv_mirror_rimage_1] snowvg_mirror iwi-aor--- 4.00g [unknown](1)
[lisalv_mirror_rmeta_0] snowvg_mirror ewi-aor--- 4.00m /dev/vda1(0)
[lisalv_mirror_rmeta_1] snowvg_mirror ewi-aor--- 4.00m [unknown](0)
4) 移除破坏掉的设备
[root@localhost ~]# vgreduce --removemissing --force snowvg_mirror
5) 解除LV的镜像功能
[root@localhost ~]# lvconvert -m0 /dev/snowvg_mirror/lisalv_mirror
Are you sure you want to convert raid1 LV snowvg_mirror/lisalv_mirror to type linear losing all resilience? [y/n]: y
Logical volume snowvg_mirror/lisalv_mirror successfully converted.
6) 加入新盘并转换为镜像
[root@localhost ~]# pvcreate /dev/vdc1
Physical volume "/dev/vdc1" successfully created.
[root@localhost ~]# vgextend snowvg_mirror /dev/vdc1
Volume group "snowvg_mirror" successfully extended
[root@localhost ~]# lvconvert -m1 /dev/snowvg_mirror/lisalv_mirror /dev/vdc1
Are you sure you want to convert linear LV snowvg_mirror/lisalv_mirror to raid1 with 2 images enhancing resilience? [y/n]: y
Logical volume snowvg_mirror/lisalv_mirror successfully converted.
7) 数据开始同步
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lisalv_mirror snowvg_mirror rwi-a-r--- 4.00g 48.28
8) 数据同步结束
[root@localhost ~]# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lisalv_mirror snowvg_mirror rwi-a-r--- 4.00g 100.00
|
如对您有帮助,请随缘打个赏。^-^