In this example, rpm is used for installation.
To install source, you must install GCC. For more information, see install GCC.
See lustre file system configuration.
Note: Source RPM must be installed. Otherwise, the module libcfs cannot be found during LMC configuration.
# Kernel patch required for lustre startup Installation
Rpm-IVH kernel-lustre-smp-2.6.9-42.EL_lustre.1.5.95.i686.rpm
Kernel-lustre-source-2.6.9-42.EL_lustre.1.5.95.i686.rpm
# Change/etc/grub. conf
# Watch out the original kernel boot information and start it with lustre kernel boot.
Title Red Hat Enterprise Linux as (2.6.9-42.0.2.el _ lustre.1.4.7.1smp)
Root (hd0, 0)
Kernel boot/vmlinuz-2.6.9-42.0.2.EL_lustre.1.4.7.1smp Ro root = label =/1 rhgb quiet
Initrd/boot/initrd-2.6.9-42.0.2.EL_lustre.1.4.7.1smp.img
# Title Red Hat Enterprise Linux as (2.6.9-42.el)
# Root (hd0, 0)
# Kernel/boot/vmlinuz-2.6.9-42.EL Ro root = label =/1 rhgb quiet
# Initrd/boot/initrd-2.6.9-42.EL.img
# Restart the system. Run the uname-R command to check whether the lustre kernel is enabled.
# Installing the lustre File System
Rpm-IVH lustre-1.5.95-2.6.9_42.EL_lustre.1.5.95smp.i686.rpm
Rpm-IVH lustre-modules-1.5.95-2.6.9_42.EL_lustre.1.5.95smp.i686.rpm
Rpm-IVH tcl-8.4.7-2.i386.rpm
Rpm-IVH expect-5.42.1-1.i386.rpm
Rpm-IVH lustre-source-1.5.95-2.6.9_42.EL_lustre.1.5.95smp.i686.rpm
Rpm-IVH lustre-debuginfo-1.5.95-2.6.9_42.EL_lustre.1.5.95smp.i686.rpm
Modify/etc/modprobe. conf and add the following content:
# Networking options, see/sys/module/lnet/parameters
Options lnet networks = TCP
# Alias lustre llite -- remove this line from existing modprobe. conf
# (The llite module has been renamed to lustre)
# End lustre modules
Installation is complete.
Lustre 1.6 adopts the new configuration method mountconf
Lustre 1.6 is configured through mkfs. lustre and mount. lconf, LMC, and XML are no longer used.
# Note that the following configurations of MGS, OST, and client are based on the same host
# Prepare the partition/dev/sdb1,/dev/sbd2 in advance
# Configure MDT/MGS
Mkfs. lustre -- fsname = testfs -- MDT -- MGS/dev/sdb1
Mkdir-P/mnt/test/MDT
Mount-T lustre/dev/sdb1/mnt/test/MDT
CAT/proc/fs/lustre/devices
If the information is correct, the following information is displayed:
[Root @ client ~] # Cat/proc/fs/lustre/devices
0 up MGS 5
1 up MGC mgc172.20.11.238 @ TCP acfea366-ced8-76d0-8cd3-510ddb2a030a 5
2 up mdt mds mds_uuid 3
3 up lov testfs-mdtlov testfs-mdtlov_UUID 4
4 up MDS testfs-MDT0000 testfs-MDT0000_UUID 3
# Configuring ost
# Note that the -- device-size parameter must be specified here (MGT and OST are on the same host, but not tested in other cases)
Mkfs. lustre -- fsname = testfs -- ost -- mgsnode = LC2 @ tcp0/dev/sdb2
# Here, the parameter -- fsname is the same as that of MDT. If the same host needs to be specified -- device-size and-o loop is used for Mount
-- Mgsnode = <MGS Host Name> @ TCP
Mkdir-P/mnt/test/ost0
Mount-T lustre/dev/sdb2/mnt/test/ost0
# Configure the client
Mkdir-P/mnt/client
Mount-T lustre LC2 @ tcp0:/testfs/mnt/client
# Client Test
CD/mnt/client
Mkdir
Ls-L/mnt/client
# See the following output
[Root @ Client client] # ls-L/mnt/client
Total 4
Drwxr-XR-x 2 root Root 4096 Nov 18
# View partitions
DF
Filesystem 1k-blocks used available use % mounted on
/Dev/mapper/VolGroup00-LogVol00
1515856 1393608 45244 97%/
/Dev/sda1 101086 16072 79795 17%/boot
None 127516 0 127516 0%/dev/SHM
// 172.20.11.201/Linux
81923432 14427968 67495464 18%/mnt/SMB
/Dev/sdb1 281064 16676 248328 7%/mnt/test/MDT
/Dev/loop0 301172 16784 268324 6%/mnt/test/ost0
172.20.11.238 @ TCP:/testfs
301172 16784 268260 6%/mnt/client