User Tools

Site Tools


linux:linux_gpfs

Under construction

Requisites

    5  vi /etc/hosts
    6  mount /dev/sr0 /mnt
    7  mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.old
    8  yum -y install ntp
    9  yum -y install gcc
   10  vi /etc/selinux/config
   11  systemctl stop firewalld
   12  systemctl disable firewalld

   20  df
   21  bootlist -m normal -o
   22  multipath -ll
   23  bootlist -m normal sda sdg

   25  systemctl start ntpd
   26  systemctl enable ntpd
   27  systemctl disable firewalld

   34  cd rsct/
   35  ll
   36  rpm -Uhv src-3.2.1.3-16106.ppc64.rpm

   40  yum -y install ksh
   41  yum -y install sg3_utils
   42  yum -y install nfs-utils
   43  yum -y install lshw
   44  yum -y install net-tools
   45  yum -y install telnet
   46  yum -y install psmisc

   48  rpm -Uhv rsct.core-3.2.1.3-16106.ppc64.rpm        rsct.core.utils-3.2.1.3-16106.ppc64.rpm
   49  rpm -Uhv src-3.2.1.3-16106.ppc64.rpm
   50  rpm -Uhv rsct.core-3.2.1.3-16106.ppc64.rpm        rsct.core.utils-3.2.1.3-16106.ppc64.rpm
   51  rpm -Uhv devices.chrp.base.ServiceRM-2.5.1.1-16106.ppc64.rpm DynamicRM-2.0.3-1.ppc64.rpm
   52  rpm -Uhv nmon-16e-0.el7.ppc64.rpm
   53  rpm -Uhv ppc64-diag-2.6.10-1.el7.ppc64.rpm lsvpd-1.7.7-1.el7.ppc64.rpm libvpd-2.2.5-1.el7.ppc64.rpm
   54  bootlist -m normal -o
   55  lssrc -a
   56  ntpq -p


   63  cd 4.2.1

   65  ./Spectrum_Scale_Protocols_Standard-4.2.1.1-ppc64-Linux-install
   66  cd /usr/lpp/mmfs/4.2.1.1/gpfs_rpms

   74  rpm -Uhv gpfs.base-4.2.1-1.ppc64.rpm gpfs.docs-4.2.1-1.noarch.rpm gpfs.ext-4.2.1-1.ppc64.rpm gpfs.gpl-4.2.1-1.noarch.rpm gpfs.gskit-8.0.50-57.ppc64.rpm gpfs.msg.en_US-4.2.1-1.noarch.rpm
   75  yum install m4
   76  rpm -Uhv gpfs.base-4.2.1-1.ppc64.rpm gpfs.docs-4.2.1-1.noarch.rpm gpfs.ext-4.2.1-1.ppc64.rpm gpfs.gpl-4.2.1-1.noarch.rpm gpfs.gskit-8.0.50-57.ppc64.rpm gpfs.msg.en_US-4.2.1-1.noarch.rpm
   77  ls
   78  rpm -Uhv gpfs.java-4.2.1-1.ppc64.rpm gpfs.gui-4.2.1-1.noarch.rpm
   79  yum -y install nc bzip2 postgresql-server
   80  rpm -Uhv gpfs.java-4.2.1-1.ppc64.rpm
   81  cd ..
   82  ll
   83  cd zimon_rpms
   84  ll
   85  rpm -Uhv gpfs.gss.pmsensors-4.2.1-1.el7.ppc64.rpm gpfs.gss.pmcollector-4.2.1-1.el7.ppc64.rpm
   86  yum -y install boost-regex
[root@rhlabh1 zimon_rpms]# rpm -Uhv gpfs.gss.pmcollector-4.2.1-1.el7.ppc64.rpm gpfs.gss.pmsensors-4.2.1-1.el7.ppc64.rpm
[root@rhlabh1 gpfs_rpms]# rpm -Uhv gpfs.gui-4.2.1-1.noarch.rpm
[root@rhlabh1 gpfs_rpms]# export LINUX_DISTRIBUTION=REDHAT_AS_LINUX
[root@rhlabh1 gpfs_rpms]# mmbuildgpl --build-package
yum -y install kernel-devel gcc-c++ rpm-build
[root@rhlabh1 gpfs_rpms]# rpm -Uhv /root/rpmbuild/RPMS/ppc64/gpfs.gplbin-3.10.0-327.el7.ppc64-4.2.1-1.ppc64.rpm

# yum -y install libarchive
rpm -Uhv smb_rpms/gpfs.smb-4.3.11_gpfs_21-3.el7.ppc64.rpm

# yum -y install libwbclient
cd /usr/lpp/mmfs/4.2.1.1/ganesha_rpms

rpm -Uhv nfs-ganesha-gpfs-2.3.2-0.ibm24.el7.ppc64.rpm nfs-ganesha-2.3.2-0.ibm24.el7.ppc64.rpm nfs-ganesha-utils-2.3.2-0.ibm24.el7.ppc64.rpm



# yum -y install gpfs.protocols-support

[root@rhlabh1 yum.repos.d]# cat cdrom.repo gpfs.repo
[cdrom]
name=CDROM Repo
baseurl=file:///mnt
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

[root@rhlabh1 yum.repos.d]# cat gpfs.repo
[gpfs_base]
name=GPFS_base
baseurl=file:/usr/lpp/mmfs/4.2.1.1/gpfs_rpms/
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

[gpfs_obj]
name=GPFS_obj
baseurl=file:/usr/lpp/mmfs/4.2.1.1/object_rpms/
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

[gpfs_nfs]
name=GPFS_nfs
baseurl=file:/usr/lpp/mmfs/4.2.1.1/ganesha_rpms/
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

[gpfs_smb]
name=GPFS_smb
baseurl=file:/usr/lpp/mmfs/4.2.1.1/smb_rpms/
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

[gpfs_zimon]
name=GPFS_zimon
baseurl=file:/usr/lpp/mmfs/4.2.1.1/zimon_rpms/
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release


# yum -y install createrepo
 [root@rhlabh1 ~]# cat nsd_disk.lst
%nsd:
device=/dev/dm-11
nsd=nsd_t1_h
servers=rhlabh1,rhlabr1
usage=dataAndMetadata
failureGroup=1
pool=system

%nsd:
device=/dev/dm-10
nsd=nsd_t2_h
servers=rhlabh1,rhlabr1
usage=dataOnly
failureGroup=1
pool=slow

%nsd:
device=/dev/dm-12
nsd=nsd_t1_r
servers=rhlabh1,rhlabr1
usage=dataAndMetadata
failureGroup=2
pool=system

%nsd:
device=/dev/dm-13
nsd=nsd_t2_r
servers=rhlabh1,rhlabr1
usage=dataOnly
failureGroup=2
pool=slow
 
[root@rhlabh1 ~]#  mmcrfs gpfs1 -F nsd_disk.lst -k nfs4 -T /gpfs1  -m 2 -M2 -r2 -R2

The following disks of gpfs1 will be formatted on node rhlabr1:
    nsd_t1_h: size 30720 MB
    nsd_t2_h: size 30720 MB
    nsd_t1_r: size 30720 MB
    nsd_t2_r: size 30720 MB
Formatting file system ...
Disks up to size 280 GB can be added to storage pool system.
Disks up to size 280 GB can be added to storage pool slow.
Creating Inode File
Creating Allocation Maps
Creating Log Files
Clearing Inode Allocation Map
Clearing Block Allocation Map
Formatting Allocation Map for storage pool system
Formatting Allocation Map for storage pool slow
Completed creation of file system /dev/gpfs1.
mmcrfs: Propagating the cluster configuration data to all
  affected nodes.  This is an asynchronous process.

[root@rhlabh1 ~]# mmlsfs all

File system attributes for /dev/gpfs1:
======================================
flag                value                    description
------------------- ------------------------ -----------------------------------
 -f                 8192                     Minimum fragment size in bytes
 -i                 4096                     Inode size in bytes
 -I                 16384                    Indirect block size in bytes
 -m                 2                        Default number of metadata replicas
 -M                 2                        Maximum number of metadata replicas
 -r                 2                        Default number of data replicas
 -R                 2                        Maximum number of data replicas
 -j                 cluster                  Block allocation type
 -D                 nfs4                     File locking semantics in effect
 -k                 nfs4                     ACL semantics in effect
 -n                 32                       Estimated number of nodes that will mount file system
 -B                 262144                   Block size
 -Q                 none                     Quotas accounting enabled
                    none                     Quotas enforced
                    none                     Default quotas enabled
 --perfileset-quota No                       Per-fileset quota enforcement
 --filesetdf        No                       Fileset df enabled?
 -V                 15.01 (4.2.0.0)          File system version
 --create-time      Fri Sep 30 16:11:20 2016 File system creation time
 -z                 No                       Is DMAPI enabled?
 -L                 33554432                 Logfile size
 -E                 Yes                      Exact mtime mount option
 -S                 No                       Suppress atime mount option
 -K                 whenpossible             Strict replica allocation option
 --fastea           Yes                      Fast external attributes enabled?
 --encryption       No                       Encryption enabled?
 --inode-limit      123008                   Maximum number of inodes
 --log-replicas     0                        Number of log replicas
 --is4KAligned      Yes                      is4KAligned?
 --rapid-repair     Yes                      rapidRepair enabled?
 --write-cache-threshold 0                   HAWC Threshold (max 65536)
 -P                 system;slow              Disk storage pools in file system
 -d                 nsd_t1_h;nsd_t2_h;nsd_t1_r;nsd_t2_r  Disks in file system
 -A                 yes                      Automatic mount option
 -o                 none                     Additional mount options
 -T                 /gpfs1                   Default mount point
 --mount-priority   0                        Mount priority

[root@rhlabh1 ~]# mmlsfs all -d

File system attributes for /dev/gpfs1:
======================================
flag                value                    description
------------------- ------------------------ -----------------------------------
 -d                 nsd_t1_h;nsd_t2_h;nsd_t1_r;nsd_t2_r  Disks in file system

[root@rhlabh1 ~]# mmmount all
Fri Sep 30 16:16:04 CEST 2016: mmmount: Mounting file systems ...

[root@rhlabh1 ~]# mmlsdisk /dev/gpfs1
disk         driver   sector     failure holds    holds                            storage
name         type       size       group metadata data  status        availability pool
------------ -------- ------ ----------- -------- ----- ------------- ------------ ------------
nsd_t1_h     nsd         512           1 Yes      Yes   ready         up           system
nsd_t2_h     nsd         512           1 No       Yes   ready         up           slow
nsd_t1_r     nsd         512           2 Yes      Yes   ready         up           system
nsd_t2_r     nsd         512           2 No       Yes   ready         up           slow
[root@rhlabh1 ~]# mmlsdisk /dev/gpfs1 -M

Disk name     IO performed on node     Device             Availability
------------  -----------------------  -----------------  ------------
nsd_t1_h      localhost                /dev/dm-11         up
nsd_t2_h      localhost                /dev/dm-10         up
nsd_t1_r      localhost                /dev/dm-12         up
nsd_t2_r      localhost                /dev/dm-13         up

[root@rhlabh1 ~]# df
Filesystem                  1K-blocks    Used Available Use% Mounted on
/dev/mapper/ca_rhlabh1-root  23089088 4019816  19069272  18% /
devtmpfs                      1778752       0   1778752   0% /dev
tmpfs                         1815168       0   1815168   0% /dev/shm
tmpfs                         1815168   81344   1733824   5% /run
tmpfs                         1815168       0   1815168   0% /sys/fs/cgroup
/dev/mapper/mpatha2            508588  173280    335308  35% /boot
tmpfs                          363072       0    363072   0% /run/user/0
gpfs1                       125829120 3387392 122441728   3% /gpfs1
[root@rhlabh1 ~]# mmdf /dev/gpfs1
disk                disk size  failure holds    holds              free KB             free KB
name                    in KB    group metadata data        in full blocks        in fragments
--------------- ------------- -------- -------- ----- -------------------- -------------------
Disks in storage pool: system (Maximum disk size allowed is 261 GB)
nsd_t1_h             31457280        1 Yes      Yes        29829632 ( 95%)           584 ( 0%)
nsd_t1_r             31457280        2 Yes      Yes        29829632 ( 95%)           584 ( 0%)
                -------------                         -------------------- -------------------
(pool total)         62914560                              59659264 ( 95%)          1168 ( 0%)

Disks in storage pool: slow (Maximum disk size allowed is 261 GB)
nsd_t2_h             31457280        1 No       Yes        31391232 (100%)           376 ( 0%)
nsd_t2_r             31457280        2 No       Yes        31391232 (100%)           376 ( 0%)
                -------------                         -------------------- -------------------
(pool total)         62914560                              62782464 (100%)           752 ( 0%)

                =============                         ==================== ===================
(data)              125829120                             122441728 ( 97%)          1920 ( 0%)
(metadata)           62914560                              59659264 ( 95%)          1168 ( 0%)
                =============                         ==================== ===================
(total)             125829120                             122441728 ( 97%)          1920 ( 0%)

Inode Information
-----------------
Number of used inodes:            4038
Number of free inodes:          118970
Number of allocated inodes:     123008
Maximum number of inodes:       123008


[root@rhlabh1 gpfs]# mmlsconfig
Configuration data for cluster gpfs_test.rhlabh1:
-----------------------------------------------------
clusterName gpfs_test.rhlabh1
clusterId 9668046452208786064
autoload yes
profile gpfsProtocolDefaults
dmapiFileHandleSize 32
minReleaseLevel 4.2.1.0
ccrEnabled yes
cipherList AUTHONLY
maxblocksize 16M
[cesNodes]
maxMBpS 5000
numaMemoryInterleave yes
enforceFilesetQuotaOnRoot yes
workerThreads 512
[common]
tscCmdPortRange 60000-61000
adminMode central

File systems in cluster gpfs_test.rhlabh1:
----------------------------------------------
/dev/gpfs1

Creation of a file system or fileset or path for a CES shared root, and creation of an object fileset The installation toolkit uses a shared root storage area to install the protocols on each node. This storage is also used by NFS and object protocols to maintain system data associated with the cluster integration we provide. This storage can be a subdirectory in an existing file system or it can be a file system on its own. Once this option is set, changing it will requires a restart of GPFS. You can use the installation toolkit to set up this CES shared root storage area if you use the toolkit for GPFS installation and file system creation. For more information, see “Using the installation toolkit to perform installation tasks: Explanations and examples” on page 271. However, if you want to set up shared root before launching the installation toolkit, the following steps can be used: 1. Create a file system or fileset for shared root. Size must be at least 4 GB. 2. Use the following command: mmchconfig cesSharedRoot=path_to_the_filesystem/fileset_created_in_step_1 For Object, the installation toolkit creates an independent fileset in the GPFS file system that you name.

[root@rhlabh1 gpfs]# mmchconfig cesSharedRoot=/gpfs1
mmchconfig: Command successfully completed
mmchconfig: Propagating the cluster configuration data to all
  affected nodes.  This is an asynchronous process.
[root@rhlabh1 gpfs]#  mmlsconfig
Configuration data for cluster gpfs_test.rhlabh1:
-----------------------------------------------------
clusterName gpfs_test.rhlabh1
clusterId 9668046452208786064
autoload yes
profile gpfsProtocolDefaults
dmapiFileHandleSize 32
minReleaseLevel 4.2.1.0
ccrEnabled yes
cipherList AUTHONLY
maxblocksize 16M
[cesNodes]
maxMBpS 5000
numaMemoryInterleave yes
enforceFilesetQuotaOnRoot yes
workerThreads 512
[common]
tscCmdPortRange 60000-61000
cesSharedRoot /gpfs1
adminMode central

File systems in cluster gpfs_test.rhlabh1:
----------------------------------------------
/dev/gpfs1
[root@rhlabh1 gpfs]# mmchnode --ces-enable -N rhlabh1,rhlabr1
Fri Sep 30 17:12:30 CEST 2016: mmchnode: Processing node rhlabr1
Fri Sep 30 17:12:50 CEST 2016: mmchnode: Processing node rhlabh1
mmchnode: Propagating the cluster configuration data to all
  affected nodes.  This is an asynchronous process.

[root@rhlabh1 gpfs]# mmlscluster

GPFS cluster information
========================
  GPFS cluster name:         gpfs_test.rhlabh1
  GPFS cluster id:           9668046452208786064
  GPFS UID domain:           gpfs_test.rhlabh1
  Remote shell command:      /usr/bin/ssh
  Remote file copy command:  /usr/bin/scp
  Repository type:           CCR

 Node  Daemon node name  IP address     Admin node name  Designation
---------------------------------------------------------------------
   1   rhlabh1           10.10.10.103  rhlabh1          quorum-manager-perfmon
   2   rhlabr1           10.10.10.104  rhlabr1          quorum-manager-perfmon

     [root@rhlabr1 /]# mmlscluster --ces

GPFS cluster information
========================
  GPFS cluster name:         gpfs_test.rhlabh1
  GPFS cluster id:           9668046452208786064

Cluster Export Services global parameters
-----------------------------------------
  Shared root directory:                /gpfs1
  Enabled Services:                     None
  Log level:                            0
  Address distribution policy:          even-coverage

 Node  Daemon node name            IP address       CES IP address list
-----------------------------------------------------------------------
   1   rhlabh1                     10.10.10.103    None
   2   rhlabr1                     10.10.10.104    None

   
[root@rhlabh1 gpfs]# mmces service enable NFS
rhlabr1:  Redirecting to /bin/systemctl start  rpcbind.service
rhlabr1:  Redirecting to /bin/systemctl start  nfs-ganesha.service
rhlabh1:  Redirecting to /bin/systemctl start  rpcbind.service
rhlabh1:  Redirecting to /bin/systemctl start  nfs-ganesha.service
mmchconfig: Command successfully completed
mmchconfig: Propagating the cluster configuration data to all
  affected nodes.  This is an asynchronous process.

[root@rhlabh1 gpfs]# mmces service enable SMB
rhlabr1:  Redirecting to /bin/systemctl start  gpfs-ctdb.service
rhlabh1:  Redirecting to /bin/systemctl start  gpfs-ctdb.service
rhlabr1:  Wait for ctdb to become ready. State=STARTUP
rhlabh1:  Wait for ctdb to become ready. State=STARTUP
rhlabr1:  Wait for ctdb to become ready. State=STARTUP
rhlabh1:  Wait for ctdb to become ready. State=STARTUP
rhlabr1:  mmchconfig: Command successfully completed
rhlabh1:  mmchconfig: Unable to obtain the GPFS configuration file lock. Retrying ...
rhlabr1:  mmchconfig: Propagating the cluster configuration data to all
rhlabr1:    affected nodes.  This is an asynchronous process.
rhlabr1:  Redirecting to /bin/systemctl start  gpfs-smb.service
rhlabh1:  mmchconfig: Lock creation successful.
rhlabh1:  mmchconfig: Command successfully completed
rhlabh1:  mmchconfig: Propagating the cluster configuration data to all
rhlabh1:    affected nodes.  This is an asynchronous process.
rhlabh1:  Redirecting to /bin/systemctl start  gpfs-smb.service
mmchconfig: Command successfully completed
mmchconfig: Propagating the cluster configuration data to all
  affected nodes.  This is an asynchronous process.

  
   
 
[root@rhlabr1 /]# mmlscluster --ces

GPFS cluster information
========================
  GPFS cluster name:         gpfs_test.rhlabh1
  GPFS cluster id:           9668046452208786064

Cluster Export Services global parameters
-----------------------------------------
  Shared root directory:                /gpfs1
  Enabled Services:                     NFS SMB
  Log level:                            0
  Address distribution policy:          even-coverage

 Node  Daemon node name            IP address       CES IP address list
-----------------------------------------------------------------------
   1   rhlabh1                     10.10.10.103    None
   2   rhlabr1                     10.10.10.104    None
[root@rhlabh1 gpfs]# mmces service list --all
Enabled services: NFS SMB
rhlabr1:  NFS is running, SMB is running
rhlabh1:  NFS is running, SMB is running



[root@rhlabh1 gpfs]# mmnfs export add /gpfs1
 mmnfs export list
mmcesfuncs.sh: Current authentication: none is invalid.
This operation can not be completed without correct Authentication configuration.
Configure authentication using:   mmuserauth
mmnfs export add: Command failed. Examine previous error messages to determine cause.
[root@rhlabh1 gpfs]#  mmnfs export list
mmnfs export list [E]: Cannot list any exports. There are no exports to show.
[root@rhlabh1 gpfs]# mmuserauth
Usage:
        mmuserauth service              Configure the system authentication service
[root@rhlabh1 gpfs]# mmuserauth service
Usage:
        mmuserauth service create       Create new authentication configuration
        mmuserauth service check        Check authentication configuration and correct if needed
        mmuserauth service list         List current authentication configuration
        mmuserauth service remove       Remove current authentication configuration
[root@rhlabh1 gpfs]# mmuserauth service list
FILE access not configured
PARAMETERS               VALUES
-------------------------------------------------

OBJECT access not configured
PARAMETERS               VALUES
-------------------------------------------------
[root@rhlabh1 gpfs]#  mmuserauth service create --data-access-method file --type userdefined
File authentication configuration completed successfully.
[root@rhlabh1 gpfs]#  FILE access configuration : USERDEFINED
PARAMETERS               VALUES
-------------------------------------------------

OBJECT access not configured
PARAMETERS               VALUES
-------------------------------------------------

[root@rhlabh1 gpfs]#  mmnfs export add '/gpfs01/backupdb' -c '10.1.0.0/16(Access_Type=RW,squash=root_squash,protocols=3:4)' 


[root@gpfs01 scripts]# mmchdisk gpfs01lv change -d "GPFS_NSD_META01:::metadataOnly"
Attention: Disk parameters were changed.
  Use the mmrestripefs command with the -r option to relocate data and metadata.
Verifying file system configuration information ...
mmchdisk: Propagating the cluster configuration data to all
  affected nodes.  This is an asynchronous process.
[root@gpfs01 scripts]# mmrestripefs gpfs01lv -r
Scanning file system metadata, phase 1 ...
Scan completed successfully.
Scanning file system metadata, phase 2 ...
Scan completed successfully.
Scanning file system metadata, phase 3 ...
Scan completed successfully.
Scanning file system metadata, phase 4 ...
Scan completed successfully.
Scanning user file metadata ...
 100.00 % complete on Thu Apr 26 15:02:24 2018  (    606208 inodes with total       3879 MB data processed)
Scan completed successfully.
[root@gpfs01 scripts]# mmlsdisk gpfs01lv
disk         driver   sector     failure holds    holds                            storage
name         type       size       group metadata data  status        availability pool
------------ -------- ------ ----------- -------- ----- ------------- ------------ ------------
GPFS_NSD_DATA01 nsd         512          -1 No       Yes   ready         up           system
GPFS_NSD_DATA02 nsd         512          -1 No       Yes   ready         up           system
GPFS_NSD_DATA03 nsd         512          -1 No       Yes   ready         up           system
GPFS_NSD_DATA04 nsd         512          -1 No       Yes   ready         up           system
GPFS_NSD_DATA05 nsd         512          -1 No       Yes   ready         up           system
GPFS_NSD_DATA06 nsd         512          -1 No       Yes   ready         up           system
GPFS_NSD_META01 nsd         512          -1 Yes      No    ready         up           system
GPFS_NSD_META02 nsd         512          -1 Yes      No    ready         up           system
GPFS_NSD_META03 nsd         512          -1 Yes      No    ready         up           system
GPFS_NSD_META04 nsd         512          -1 Yes      No    ready         up           system
[root@gpfs01 scripts]#
linux/linux_gpfs.txt · Last modified: 2021/01/01 21:25 (external edit)