Linux comes with powerful logical storage volume management in Logical Volume Manager (LVM).
Below is an example of how to set up a logical volume with striping (raid-0) across multiple disks using LVM.
First attach multiple disks to the machine.
In this case, I have added 20 disks of each 4 GB seen below as
sdd .. sdw
.
List connected storage devices:
~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
fd0 2:0 1 4K 0 disk
loop0 7:0 0 55.5M 1 loop /snap/core18/2409
loop1 7:1 0 55.5M 1 loop /snap/core18/2284
loop2 7:2 0 61.9M 1 loop /snap/core20/1518
loop3 7:3 0 67.2M 1 loop /snap/lxd/21835
loop4 7:4 0 67.8M 1 loop /snap/lxd/22753
loop5 7:5 0 61.9M 1 loop /snap/core20/1361
loop6 7:6 0 43.6M 1 loop /snap/snapd/14978
loop7 7:7 0 47M 1 loop /snap/snapd/16010
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 1M 0 part
└─sda2 8:2 0 30G 0 part /
sdb 8:16 0 64G 0 disk
└─sdb1 8:17 0 64G 0 part /data
sdc 8:32 0 600G 0 disk
└─sdc1 8:33 0 600G 0 part /mnt
sdd 8:48 0 4G 0 disk
sde 8:64 0 4G 0 disk
sdf 8:80 0 4G 0 disk
sdg 8:96 0 4G 0 disk
sdh 8:112 0 4G 0 disk
sdi 8:128 0 4G 0 disk
sdj 8:144 0 4G 0 disk
sdk 8:160 0 4G 0 disk
sdl 8:176 0 4G 0 disk
sdm 8:192 0 4G 0 disk
sdn 8:208 0 4G 0 disk
sdo 8:224 0 4G 0 disk
sdp 8:240 0 4G 0 disk
sdq 65:0 0 4G 0 disk
sdr 65:16 0 4G 0 disk
sds 65:32 0 4G 0 disk
sdt 65:48 0 4G 0 disk
sdu 65:64 0 4G 0 disk
sdv 65:80 0 4G 0 disk
sdw 65:96 0 4G 0 disk
Create a partition table and a single partition on the first device:
cfdisk /dev/sdd
Then copy the partition table onto the remaining disks:
cfdisk /dev/sdd
for disk in e f g h i j k l m n o p q r s t u v w ; do sfdisk -d /dev/sdd | sfdisk --force /dev/sd$disk; done
See the changes in lsblk
:
~# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
fd0 2:0 1 4K 0 disk
loop0 7:0 0 55.5M 1 loop /snap/core18/2409
loop1 7:1 0 55.5M 1 loop /snap/core18/2284
loop2 7:2 0 61.9M 1 loop /snap/core20/1518
loop3 7:3 0 67.2M 1 loop /snap/lxd/21835
loop4 7:4 0 67.8M 1 loop /snap/lxd/22753
loop5 7:5 0 61.9M 1 loop /snap/core20/1361
loop6 7:6 0 43.6M 1 loop /snap/snapd/14978
loop7 7:7 0 47M 1 loop /snap/snapd/16010
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 1M 0 part
└─sda2 8:2 0 30G 0 part /
sdb 8:16 0 64G 0 disk
└─sdb1 8:17 0 64G 0 part /data
sdc 8:32 0 600G 0 disk
└─sdc1 8:33 0 600G 0 part /mnt
sdd 8:48 0 4G 0 disk
└─sdd1 8:49 0 4G 0 part
sde 8:64 0 4G 0 disk
└─sde1 8:65 0 4G 0 part
sdf 8:80 0 4G 0 disk
└─sdf1 8:81 0 4G 0 part
sdg 8:96 0 4G 0 disk
└─sdg1 8:97 0 4G 0 part
sdh 8:112 0 4G 0 disk
└─sdh1 8:113 0 4G 0 part
sdi 8:128 0 4G 0 disk
└─sdi1 8:129 0 4G 0 part
sdj 8:144 0 4G 0 disk
└─sdj1 8:145 0 4G 0 part
sdk 8:160 0 4G 0 disk
└─sdk1 8:161 0 4G 0 part
sdl 8:176 0 4G 0 disk
└─sdl1 8:177 0 4G 0 part
sdm 8:192 0 4G 0 disk
└─sdm1 8:193 0 4G 0 part
sdn 8:208 0 4G 0 disk
└─sdn1 8:209 0 4G 0 part
sdo 8:224 0 4G 0 disk
└─sdo1 8:225 0 4G 0 part
sdp 8:240 0 4G 0 disk
└─sdp1 8:241 0 4G 0 part
sdq 65:0 0 4G 0 disk
└─sdq1 65:1 0 4G 0 part
sdr 65:16 0 4G 0 disk
└─sdr1 65:17 0 4G 0 part
sds 65:32 0 4G 0 disk
└─sds1 65:33 0 4G 0 part
sdt 65:48 0 4G 0 disk
└─sdt1 65:49 0 4G 0 part
sdu 65:64 0 4G 0 disk
└─sdu1 65:65 0 4G 0 part
sdv 65:80 0 4G 0 disk
└─sdv1 65:81 0 4G 0 part
sdw 65:96 0 4G 0 disk
└─sdw1 65:97 0 4G 0 part
Now create a physical volume
(root@vm-jupyterhub-01) (2022-06-30 13:00:31) [0]
~# pvcreate /dev/sd{d..w}1
Physical volume "/dev/sdd1" successfully created.
Physical volume "/dev/sde1" successfully created.
Physical volume "/dev/sdf1" successfully created.
Physical volume "/dev/sdg1" successfully created.
Physical volume "/dev/sdh1" successfully created.
Physical volume "/dev/sdi1" successfully created.
Physical volume "/dev/sdj1" successfully created.
Physical volume "/dev/sdk1" successfully created.
Physical volume "/dev/sdl1" successfully created.
Physical volume "/dev/sdm1" successfully created.
Physical volume "/dev/sdn1" successfully created.
Physical volume "/dev/sdo1" successfully created.
Physical volume "/dev/sdp1" successfully created.
Physical volume "/dev/sdq1" successfully created.
Physical volume "/dev/sdr1" successfully created.
Physical volume "/dev/sds1" successfully created.
Physical volume "/dev/sdt1" successfully created.
Physical volume "/dev/sdu1" successfully created.
Physical volume "/dev/sdv1" successfully created.
Physical volume "/dev/sdw1" successfully created.
List physical volumes:
(root@vm-jupyterhub-01) (2022-06-30 13:00:36) [0]
~# pvs
PV VG Fmt Attr PSize PFree
/dev/sdd1 lvm2 --- <4.00g <4.00g
/dev/sde1 lvm2 --- <4.00g <4.00g
/dev/sdf1 lvm2 --- <4.00g <4.00g
/dev/sdg1 lvm2 --- <4.00g <4.00g
/dev/sdh1 lvm2 --- <4.00g <4.00g
/dev/sdi1 lvm2 --- <4.00g <4.00g
/dev/sdj1 lvm2 --- <4.00g <4.00g
/dev/sdk1 lvm2 --- <4.00g <4.00g
/dev/sdl1 lvm2 --- <4.00g <4.00g
/dev/sdm1 lvm2 --- <4.00g <4.00g
/dev/sdn1 lvm2 --- <4.00g <4.00g
/dev/sdo1 lvm2 --- <4.00g <4.00g
/dev/sdp1 lvm2 --- <4.00g <4.00g
/dev/sdq1 lvm2 --- <4.00g <4.00g
/dev/sdr1 lvm2 --- <4.00g <4.00g
/dev/sds1 lvm2 --- <4.00g <4.00g
/dev/sdt1 lvm2 --- <4.00g <4.00g
/dev/sdu1 lvm2 --- <4.00g <4.00g
/dev/sdv1 lvm2 --- <4.00g <4.00g
/dev/sdw1 lvm2 --- <4.00g <4.00g
(root@vm-jupyterhub-01) (2022-06-30 13:00:45) [0]
~#
Create volume group vg1
(root@vm-jupyterhub-01) (2022-06-30 13:00:45) [0]
~# vgcreate vg1 /dev/sd{d..w}1
Volume group "vg1" successfully created
(root@vm-jupyterhub-01) (2022-06-30 13:05:44) [0]
~#
List volume groups:
(root@vm-jupyterhub-01) (2022-06-30 13:05:44) [0]
~# vgs
VG #PV #LV #SN Attr VSize VFree
vg1 20 0 0 wz--n- 79.92g 79.92g
Show details of vg1
:
(root@vm-jupyterhub-01) (2022-06-30 13:06:04) [0]
~# vgdisplay -v vg1
--- Volume group ---
VG Name vg1
System ID
Format lvm2
Metadata Areas 20
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 20
Act PV 20
VG Size 79.92 GiB
PE Size 4.00 MiB
Total PE 20460
Alloc PE / Size 0 / 0
Free PE / Size 20460 / 79.92 GiB
VG UUID 5D41AY-mlR4-4IU5-kFtU-rpNO-UTmA-cvwc86
--- Physical volumes ---
PV Name /dev/sdd1
PV UUID UEPhXK-sOi6-153O-P12j-Yqbb-McKV-LGIlFp
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sde1
PV UUID 3WYqbm-vLU0-i00t-tO4C-fIk3-XbBd-GRZvYd
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdf1
PV UUID GexFYl-bTKS-xO6B-YXFg-dAqb-BRSl-kV0nxx
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdg1
PV UUID 2zs5Z4-ZRUe-sQPc-FelC-DRyv-WDc4-pIO0BI
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdh1
PV UUID GiG9dr-RW6U-DfN7-9McJ-eXMM-dWmW-84x6vY
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdi1
PV UUID eopmeT-YOH5-uSvK-5Uhl-9OPb-GWYK-qAwvxu
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdj1
PV UUID I5RviW-zmEb-GdiH-99Z2-yHme-asBS-0dcEiR
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdk1
PV UUID VrRsWs-tuah-KzaX-lxI9-meta-lsmF-3aJMLj
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdl1
PV UUID nTJ9mL-Pwji-IO7Z-Gl00-PGr2-dCSC-3ArUi2
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdm1
PV UUID 1C1uWS-2xNB-UZdD-e2Y4-bnmn-Ag3Y-hezVcN
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdn1
PV UUID aBsLPX-2Jeu-sMmv-lyVq-gZzk-zqIe-y3f3Ba
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdo1
PV UUID BYZMxo-eVK5-7Iyk-ezca-Luut-Qx9p-aAn5qo
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdp1
PV UUID kGzF0T-HYmd-ouJI-F91U-Q4dD-Arm4-piWA6P
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdq1
PV UUID UVEd13-kdxk-k211-cn9O-daTD-xQPc-h5Q5cZ
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdr1
PV UUID ydu0SL-x0GU-kpx8-i2wb-f04h-laWY-vy79jO
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sds1
PV UUID MfuG0S-e8Jm-dzA2-XymE-oNVl-UQ1B-ARqVXy
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdt1
PV UUID v3mLmC-20g0-qpOB-TZ9m-TaTd-86pv-cgvNKV
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdu1
PV UUID Mdokg0-Tpq6-DOxS-5jWf-1lBd-0LAr-Y0xhbX
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdv1
PV UUID Uu4Qn5-Knlf-dcqN-DZoz-ajzG-BZdY-Mkedyk
PV Status allocatable
Total PE / Free PE 1023 / 1023
PV Name /dev/sdw1
PV UUID tGtTMx-sYiM-M1vk-T4XT-UKAS-IUrd-hDOKgc
PV Status allocatable
Total PE / Free PE 1023 / 1023
Create logical volume with 20 stripes of size 64k of size 78G with name lv1
:
(root@vm-jupyterhub-01) (2022-06-30 13:13:52) [0]
~# lvcreate -i 20 -I 64k -n lv1 -L 78G vg1
Rounding size 78.00 GiB (19968 extents) up to stripe boundary size <78.05 GiB(19980 extents).
Logical volume "lv1" created.
List logical volumes:
(root@vm-jupyterhub-01) (2022-06-30 13:14:40) [0]
~# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lv1 vg1 -wi-a----- <78.05g
Show details of lv1
:
(root@vm-jupyterhub-01) (2022-06-30 13:14:41) [0]
~# lvdisplay -m /dev/vg1/lv1
--- Logical volume ---
LV Path /dev/vg1/lv1
LV Name lv1
VG Name vg1
LV UUID NyadSC-M6Vg-LIQ3-hutq-wKrQ-NrqA-JfdsCp
LV Write Access read/write
LV Creation host, time vm-jupyterhub-01, 2022-06-30 13:14:01 +0000
LV Status available
# open 0
LV Size <78.05 GiB
Current LE 19980
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 5120
Block device 253:0
--- Segments ---
Logical extents 0 to 19979:
Type striped
Stripes 20
Stripe size 64.00 KiB
Stripe 0:
Physical volume /dev/sdd1
Physical extents 0 to 998
Stripe 1:
Physical volume /dev/sde1
Physical extents 0 to 998
Stripe 2:
Physical volume /dev/sdf1
Physical extents 0 to 998
Stripe 3:
Physical volume /dev/sdg1
Physical extents 0 to 998
Stripe 4:
Physical volume /dev/sdh1
Physical extents 0 to 998
Stripe 5:
Physical volume /dev/sdi1
Physical extents 0 to 998
Stripe 6:
Physical volume /dev/sdj1
Physical extents 0 to 998
Stripe 7:
Physical volume /dev/sdk1
Physical extents 0 to 998
Stripe 8:
Physical volume /dev/sdl1
Physical extents 0 to 998
Stripe 9:
Physical volume /dev/sdm1
Physical extents 0 to 998
Stripe 10:
Physical volume /dev/sdn1
Physical extents 0 to 998
Stripe 11:
Physical volume /dev/sdo1
Physical extents 0 to 998
Stripe 12:
Physical volume /dev/sdp1
Physical extents 0 to 998
Stripe 13:
Physical volume /dev/sdq1
Physical extents 0 to 998
Stripe 14:
Physical volume /dev/sdr1
Physical extents 0 to 998
Stripe 15:
Physical volume /dev/sds1
Physical extents 0 to 998
Stripe 16:
Physical volume /dev/sdt1
Physical extents 0 to 998
Stripe 17:
Physical volume /dev/sdu1
Physical extents 0 to 998
Stripe 18:
Physical volume /dev/sdv1
Physical extents 0 to 998
Stripe 19:
Physical volume /dev/sdw1
Physical extents 0 to 998
Format the logical volume lv1
:
(root@vm-jupyterhub-01) (2022-06-30 13:15:31) [0]
~# fdisk -l /dev/vg1/lv1
Disk /dev/vg1/lv1: 78.5 GiB, 83802193920 bytes, 163676160 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 4096 bytes
I/O size (minimum/optimal): 65536 bytes / 1310720 bytes
(root@vm-jupyterhub-01) (2022-06-30 13:15:35) [0]
~# mkfs.ext4 -v -m0 /dev/vg1/lv1
mke2fs 1.45.5 (07-Jan-2020)
fs_types for mke2fs.conf resolution: 'ext4'
Discarding device blocks: done
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=16 blocks, Stripe width=320 blocks
5120000 inodes, 20459520 blocks
0 blocks (0.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=2168455168
625 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Filesystem UUID: 04475cdc-d81d-4404-9e2e-0099249d4273
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000, 7962624, 11239424
Allocating group tables: done
Writing inode tables: done
Creating journal (131072 blocks): done
Writing superblocks and filesystem accounting information: done
Test write throughput:
(root@vm-jupyterhub-01) (2022-06-30 13:18:22) [0]
/data0# head -c 70G /dev/zero |pv > /data0/rand.dat
70.0GiB 0:02:19 [ 515MiB/s]
That’s an example of how to do it.