0%

Linux: LVM thin volume and snapshot

LVM: logical volume manager

Advantage: give more flexible of space

LVM thin volume

Create thin pool first

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[root@client ~]# lvcreate -L 1G -T thin_vg/thinpool
Using default stripesize 64.00 KiB.
Logical volume "thinpool" created.
[root@client ~]# lvdisplay thin_vg/thinpool
--- Logical volume ---
LV Name thinpool
VG Name thin_vg
LV UUID ncc3Kh-eZgD-WLQn-F4cq-pPJJ-rYjD-XsTx34
LV Write Access read/write
LV Creation host, time client.centos7.study, 2019-07-12 13:02:42 -0700
LV Pool metadata thinpool_tmeta
LV Pool data thinpool_tdata
LV Status available
# open 0
LV Size 1.00 GiB
Allocated pool data 0.00%
Allocated metadata 0.98%
Current LE 256
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:6

Create a thin1 on thinpool

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
[root@client ~]# lvcreate -V 10G -T thin_vg/thinpool -n annathin1
Using default stripesize 64.00 KiB.
WARNING: Sum of all thin volume sizes (10.00 GiB) exceeds the size of thin pool thin_vg/thinpool and the size of whole volume group (2.00 GiB)!
For thin pool auto extension activation/thin_pool_autoextend_threshold should be below 100.
Logical volume "annathin1" created.
[root@client ~]# lvdisplay thin_vg
--- Logical volume ---
LV Name thinpool
VG Name thin_vg
LV UUID ncc3Kh-eZgD-WLQn-F4cq-pPJJ-rYjD-XsTx34
LV Write Access read/write
LV Creation host, time client.centos7.study, 2019-07-12 13:02:42 -0700
LV Pool metadata thinpool_tmeta
LV Pool data thinpool_tdata
LV Status available
# open 2
LV Size 1.00 GiB
Allocated pool data 0.00%
Allocated metadata 1.07%
Current LE 256
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:6

--- Logical volume ---
LV Path /dev/thin_vg/annathin1
LV Name annathin1
VG Name thin_vg
LV UUID q47tG0-P2xf-CMsw-a2tE-SgWl-gjni-VJD1jA
LV Write Access read/write
LV Creation host, time client.centos7.study, 2019-07-12 13:09:12 -0700
LV Pool name thinpool
LV Status available
# open 0
LV Size 10.00 GiB
Mapped size 0.00%
Current LE 2560
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:8
[root@client /]# mkfs.xfs /dev/thin_vg/annathin1
meta-data=/dev/thin_vg/annathin1 isize=512 agcount=16, agsize=163824 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2621184, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@client /]# mkdir /mnt/annathin1
[root@client /]# mount /dev/thin_vg/annathin1 /mnt/annathin1/
[root@client /]# df -Th /mnt/annathin1
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/thin_vg-annathin1 xfs 10G 33M 10G 1% /mnt/annathin1

copy some files to annathin1

1
2
3
4
5
6
7
8
9
[root@client ]# cp -a /etc/vsftpd/ /mnt/annathin1/
[root@client ]# cp -a /usr/share/doc/man-db-2.6.3/ /mnt/annathin1/
[root@client annathin1]# ll
total 0
drwxr-xr-x. 2 root root 113 Jun 2 2017 man-db-2.6.3
drwxr-xr-x. 2 root root 128 Apr 4 12:46 vsftpd
[root@client ~]# df -Th /mnt/annathin1/
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/thin_vg-annathin1 xfs 10G 34M 10G 1% /mnt/annathin1

The vg thin_vg actually only has 1G, but it shows 10G after create thinpool. That is the magical of LVM thinpool.
Note: users are not supposed to exceed actual 1G volume even though it shows having 10G volume, otherwise it will destory the filesystem!

LVM snapshot

create a snapshot for annathin1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
[root@client ~]# lvcreate -s -l 99 -n  thinpool_snapshot  /dev/thin_vg/annathin1
Using default stripesize 64.00 KiB.
WARNING: Sum of all thin volume sizes (10.00 GiB) exceeds the size of thin pools and the size of whole volume group (2.00 GiB)!
For thin pool auto extension activation/thin_pool_autoextend_threshold should be below 100.
Logical volume "thinpool_snapshot" created.
[root@client ~]# lvdisplay /dev/thin_vg/thinpool_snapshot
--- Logical volume ---
LV Path /dev/thin_vg/thinpool_snapshot
LV Name thinpool_snapshot
VG Name thin_vg
LV UUID 4fYojx-SvHi-5IVX-aoIk-RxpD-8Nlb-1hHK3g
LV Write Access read/write
LV Creation host, time client.centos7.study, 2019-07-14 17:01:32 -0700
LV snapshot status active destination for annathin1
LV Status available
# open 0
LV Size 10.00 GiB
Current LE 2560
COW-table size 396.00 MiB
COW-table LE 99
Allocated to snapshot 0.02%
Snapshot chunk size 4.00 KiB
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:11


[root@client ~]# lvdisplay /dev/thin_vg/annathin1
--- Logical volume ---
LV Path /dev/thin_vg/annathin1
LV Name annathin1
VG Name thin_vg
LV UUID q47tG0-P2xf-CMsw-a2tE-SgWl-gjni-VJD1jA
LV Write Access read/write
LV Creation host, time client.centos7.study, 2019-07-12 13:09:12 -0700
LV snapshot status source of
thinpool_snapshot [active]
LV Pool name thinpool
LV Status available
# open 1
LV Size 10.00 GiB
Mapped size 0.12%
Current LE 2560
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:8

Mount

1
2
3
4
5
6
7
8
9
[root@client /]# mkdir  /mnt/snapshot
[root@client snapshot]# blkid
/dev/mapper/thin_vg-annathin1: UUID="93b48865-b28d-4e2e-b76e-893e804d944c" TYPE="xfs"
/dev/mapper/thin_vg-thinpool_snapshot: UUID="93b48865-b28d-4e2e-b76e-893e804d944c" TYPE="xfs"
[root@client /]# mount -o nouuid /dev/thin_vg/thinpool_snapshot /mnt/snapshot
[root@client /]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/thin_vg-annathin1 xfs 10G 34M 10G 1% /mnt/annathin1
/dev/mapper/thin_vg-thinpool_snapshot xfs 10G 34M 10G 1% /mnt/snapshot

Due to snapshot, /dev/thin_vg/annathin1 and /dev/thin_vg/thinpool_snapshot have same uuid. When mount to mount point, users need to use “nouuid” arguments.

make some changes on the /mnt/annathin1 dir

1
2
3
4
5
6
[root@client man-db-2.6.3]# rm ChangeLog
rm: remove regular file ‘ChangeLog’?
[root@client man-db-2.6.3]# df -Th /mnt/annathin1/ /mnt/snapshot/
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/thin_vg-annathin1 xfs 10G 33M 10G 1% /mnt/annathin1
/dev/mapper/thin_vg-thinpool_snapshot xfs 10G 34M 10G 1% /mnt/snapshot

check the status of snapshot after doing the change on the original annathin1 dir

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
[root@client man-db-2.6.3]# lvdisplay /dev/thin_vg/thinpool_snapshot 
--- Logical volume ---
LV Path /dev/thin_vg/thinpool_snapshot
LV Name thinpool_snapshot
VG Name thin_vg
LV UUID eT1j03-4v5E-7fUi-BSV0-6dun-RCL1-fy9atx
LV Write Access read/write
LV Creation host, time client.centos7.study, 2019-07-14 17:18:16 -0700
LV snapshot status active destination for annathin1
LV Status available
# open 1
LV Size 10.00 GiB
Current LE 2560
COW-table size 396.00 MiB
COW-table LE 99
Allocated to snapshot 0.52%
Snapshot chunk size 4.00 KiB
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:11

It shows the snapshot has been used 0.52%

next, we will restore annathin1 by snapshot, we will backup snapshot first.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
[root@client ~]# xfsdump -l 0 -L lvm1 -M lvm1 -f /tmp/backup.lvm    /mnt/snapshot
xfsdump: using file dump (drive_simple) strategy
xfsdump: version 3.1.4 (dump format 3.0) - type ^C for status and control
xfsdump: level 0 dump of client.centos7.study:/mnt/snapshot
xfsdump: dump date: Sun Jul 14 17:31:38 2019
xfsdump: session id: 5ea4e354-bf53-4069-8b63-ec4c2259c444
xfsdump: session label: "lvm1"
xfsdump: ino map phase 1: constructing initial dump list
xfsdump: ino map phase 2: skipping (no pruning necessary)
xfsdump: ino map phase 3: skipping (only one dump stream)
xfsdump: ino map construction complete
xfsdump: estimated dump size: 786816 bytes
xfsdump: creating dump session media file 0 (media 0, file 0)
xfsdump: dumping ino map
xfsdump: dumping directories
xfsdump: dumping non-directory files
xfsdump: ending media file
xfsdump: media file size 775272 bytes
xfsdump: dump size (non-dir files) : 745816 bytes
xfsdump: dump complete: 1 seconds elapsed
xfsdump: Dump Summary:
xfsdump: stream 0 /tmp/backup.lvm OK (success)
xfsdump: Dump Status: SUCCESS
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
[root@client ~]# umount /mnt/snapshot
[root@client ~]# lvremove /dev/thin_vg/thinpool_snapshot
Do you really want to remove active logical volume thin_vg/thinpool_snapshot? [y/n]: y
Logical volume "thinpool_snapshot" successfully removed
[root@client ~]# umount /mnt/annathin1/
[root@client ~]# mkfs.xfs -f /dev/thin_vg/annathin1
meta-data=/dev/thin_vg/annathin1 isize=512 agcount=16, agsize=163824 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2621184, imaxpct=25
= sunit=16 swidth=16 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=16 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@client ~]# mount /dev/thin_vg/annathin1 /mnt/annathin1/

[root@client ~]# xfsrestore -f /tmp/backup.lvm -L lvm1 /mnt/annathin1/
xfsrestore: using file dump (drive_simple) strategy
xfsrestore: version 3.1.4 (dump format 3.0) - type ^C for status and control
xfsrestore: searching media for dump
xfsrestore: examining media file 0
xfsrestore: found dump matching specified label:
xfsrestore: hostname: client.centos7.study
xfsrestore: mount point: /mnt/snapshot
xfsrestore: volume: /dev/mapper/thin_vg-thinpool_snapshot
xfsrestore: session time: Sun Jul 14 17:31:38 2019
xfsrestore: level: 0
xfsrestore: session label: "lvm1"
xfsrestore: media label: "lvm1"
xfsrestore: file system id: 93b48865-b28d-4e2e-b76e-893e804d944c
xfsrestore: session id: 5ea4e354-bf53-4069-8b63-ec4c2259c444
xfsrestore: media id: e223f10b-7718-4801-bda7-7e066e59deb1
xfsrestore: using online session inventory
xfsrestore: searching media for directory dump
xfsrestore: reading directories
xfsrestore: 3 directories and 13 entries processed
xfsrestore: directory post-processing
xfsrestore: restoring non-directory files
xfsrestore: restore complete: 0 seconds elapsed
xfsrestore: Restore Summary:
xfsrestore: stream 0 /tmp/backup.lvm OK (success)
xfsrestore: Restore Status: SUCCESS

[root@client ~]# df -Th /mnt/annathin1/
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/thin_vg-annathin1 xfs 10G 34M 10G 1% /mnt/annathin1

It shows the annathins dir back from 33M to 34M, the original one.