Published on

Red Hat Server Admin - Advanced

Authors
  • Name
    Jackson Chen

Red Hat Admin - Advanced

# write simple bash scripts
#!   sh-bang /  she-bang
which hello # identity where path can run "hello"
which passwd
echo $PATH

cd ~  or cd # change to login user home directory
mkdir bin
cp hello bin/   # move "hello" to ~/bin directory   bin is part of user home directory

echo \# not a comment   # special character
echo '# not a comment'      # '' prevent variable expansion

var =$(hostname -s) # create variable "var"
echo $var
echo ${var} # if variable right next to other text      Note: same as $var

echo \${var}    echo '${var}'       # same without expansion


# without making the script executable, run the script
sh <scriptname>
bash <scriptname>   # or chomd +ax <scriptfile>


#******* bash for loop
for variable in list; do
    # body of loop
done

echo host{1,2,3}    # no space {1, 2, 3}

seq 2 2 10  # jump to even number

echo $?     # exit code $?  if 0, successful

# testing script inputs 
test 1 -gt 0; echo $?   # "test"    man test

#******* if/then
if <condition>; then
    <statement>
fi

if/then/elif/then/else
if <condition>; then
elif <condition>; then
else
 <statement>


# ************** regular expression
matching text in command output with regular expressions

grep '^cat' /<dir>  # starts with cat
grep 'cat$' /<dir>  # end with cat
grep '^cat$' /<dir> # exact match
grep '^c.t$' /dir   # "." single character
grep '^c[aou]t$' /dir   # 
grep 'c.*t$'        # c then 0 or more characters, then t  "*" is modifier in regular expression
grep 'c.\{2,3\}t    # 2 or 3 characters

    ?   one
    *   zero or more
    +   one or more
    {n,m}   min n, max m
    {:alpha}

grep '^computer' /dir
ps aus | grep chrony

# grep
    -i  case insentive
    -r  recursive
    
grep -e 'lm' -e 'nx' /proc/cpuinfo

grep -vE '^E|^$'    #   ^$  blank line

grep -v '^[#;]' /etc    # not start with # or ;

# the grep with "-e" options allows to search for more than one regular expression at a time   <- -e  expression
grep -e 'pam_unix' -e 'user root' -e 'Accepted publickey' /<dir>/file  | less

grep fail /var/log/boot.log

    /dev.*s     # search for dev then 0 or more character, and s

grep '^<Date>.*GID' /var/log/secure

yum history
rpm -q --scripts <packagename> | grep add   # query when then package install


grep -E 'qmgr|pickup|cleanup' /etc/postfix/master.cf


#*************** improve command line productivity
mkdir -p ~/bin  # Create /home/bin directory

ssh student@${HOST} "hostname -f" > /home/student/output-${HOST}        
        # it will create the file in the workstation, rather than in server


#*************** schedule future tasks
cron job    # schedule job
at  # one time job

now+5min    now +5min   # same result

at TIMESPeC
ctrl+d

at job to recover the iptable change    # at job to restore the good iptable job
    # this prevents firewall change which prevent you login to the computer, unlesss access via console


atq # query at queue

atrm <job num>  # remove the job

watch atq   # watch run every 2 seconds

at -q g teamtime    #
at -c 9     # ensure space between "c" and "9"

# create at job
at -q b 16:05   # time
at> commmand or task to run # example,  echo "testing" >> /tmp/test.txt
ctrl+d  # to terminate

# cron job - repeat job
crontab  -l # list job for current user
    -r  # remove all job
    -e  # edit


ls /var/spool/cron  # show cron jobs


cat /etc/crontab    # example

* 15 * * * echo "test"      # save it, it will show as crontab job

*/10 15-16 * * sun,mon  echo "test" # it will run every 10 min, at 15 to 16 hour, on sunday and monday


crontab -l  # list the cron job

tail -f /var/log/cron   # watch the cron job run

mon-fri  or   1-5   # same 


# ********* system cron job
cat /etc/crontab
    * 15 * * * root command to be exectuved

# now
cd /etc/cron.d      # it is now standard, same format as crontab

ls /etc/cron\.*     # the scripts are run
    /etc/ron.d:
    /etc/cron.daily:
    /etc/cron.monthly:
    /etc/cron.weekly:

cat /etc/anacrontab 
        # backup machanision, weekly and monthly cron job. If machine turn off, the job will run when it turns on
        # run the job if the job missed to be run

ls /var/spool/anacron   # there are files
    cron.daily
    cron.monthly
    cron.weekly

#***** system timer unit
systemctl --type timer      # show timer unit

# Do NOT modify files locate at
/usr/lib/systemd/system     # these file will be overwritten when update/upgrade
/etc/systemd/system # modify here, copy files frm above and paste here. files here take precedence!!!

systemctl daemon-reload # reload after making changes

ls *.timer  


yum list sysstat    # last metadata expiration check

rpm -ql sysstat | grep timer

# example
cp /usr/lib/systemd/system/sysstat-collector.timer /etc/systemd/system
vi /etc/systemd/system/sysstat-collector.timer
systemctl enable --now sysstat-collector.timer
systemctl daemon-reload     # reload daemon after making changes, then wait for changes to take effect


watch ls /var/log/sa    #

rpm -qd sysstat     # query document    sysstat provide system baseline informatoin

sar -A -f <file>    # analysis the file

#****** temporary files
/tmp

systemd timer will cleanup /tmp files

systemd-fmpfiles --create   
systemd-fmpfiles --remove

/etc/tmpfiles.d/*.conf      <-------- highest precedence
/run/tmpfiles.d/*.conf      <-----------lower precedence
/usr/lib/tmpfiles.d/*.conf  <-------------lowest precedence

Note:
cp /usr/lib/tmpfiles.d/tmp.conf /etc/tmpfiles.d/tmp.conf
vi /etc/tmpfiles.d/tmp.conf # make change to the time of deleting tmp files


systemd-tmpfiles --clean

systemctl enable --now systemd-tmpfiles-clean.timer

man tmpfile.d   # how to setup manually

systemd-tmpfiles --clean /etc/tmpfiles.d/tmp.conf   


# which command displays all the user jobs that are currently scheduled to run as deferred jobs
Answer: atq

Which commadn removes the deferred user jobs that has the job number 5?
Answer: atrm 5

which command displays all the recurring user jobs scheduled for the currently login user?
Answer: crontab -l

which job format executes /usr/local/bin/daily_backkup hourly from 9am to 6pm on all days from Monday through Friday?
00 09-18 * * Mon-Fri /usr/local/bin/daily_backkup

which directory contains the shell scripts intended to run on a daily basis?
/etc/cron.daily

Which configuration file defines the settings for the system jobs that run on a daily, weekly, and monthly basis?
Answer: /etc/anacrontab

Which systemd unit regularly triggers the cleanup of the temporary files
Answer: systemd-tmpfiles-clean.timer


# *************** Tunning system performance
yum install tunned  # configure system static and dynamic tunning
systemctl enable --now tunned   

# tunning profiles  # similar to windows Power settings
virtual-guest   # virtual machine

tuned-adm recommend 
tunned-adm profile <profile-name>   # throughput-performance, virtual-guest, etc

tuned-adm off
tuned-adm active


tuned-adm list  # list all the available profiles

tuned-adm profile powersave
tuned-adm active    # show the active profile

#************* influencing process sheduling
/tmp/nice.txt

mostly 0    neutral     19 nices (least prority) -20 least nice (highest priority)

ps axo pid,comm,nice,cls --sort=-nice

top     # NI    nice value
    # PR    priority

sha1sum /dev/zero &

nice sha1sum /dev/zero &    # it will have NI 10

nice -n 15 sha1sum &    # start with NI 15

renice -n 5 1986 (pid)

top
    r   # renice

grep -c '^processor' /proc/cpuinfo

jobs    # check running jobs
ps u $(pgrep sha1sum)   # list process for the required process

top
    b   as boy
    x   xray
    r   renice

nice -n 10 sha1sum /dev/zero &

ps aux --sort=pcpu | head

#****************************** control access file via ACL
setfacl -m g::perms file    # update file's grouop owner permissions
getfacl <filename>

getfacl directroy   # verify directory acl

mask::rw-   

setfacl --set-file=file1

ls -ld /var/www/html

/run/log/journal/

ls -l /run/log/journal

getfact /<dir>

getfacl /dev/sr0


# Interpreting file ACLS
Display the ACL on a directory      ->  getacl /directory
named user with read and execute permissions for a file ->  user:mary:rx file
file owner with read and execute permissions for a file ->  user::rx file
Read and write permissions for a directory granted to the directory group owner -> g:rw /directory
Read and write permissoins for a file granted to the file group owner   ->  g::rw file
Rea, write and execute permissions for a directory granted to a named grouop    ->  grou:p:hug:rwx /directory
Read and execute permissions set as the default mask    ->  default:m::rx /directory
Named user granted initial read permission for new files, and read and execute permissions for new subdirectories   ->  default:user:mary:rx /directory


# ********************* secure files with ACLs
man setfacl

setfacl -m u:<user>:r  file     # -m  modify
                      u:<user>   specific user
                      :r        read

setfacl -m g:<group>:rw  file       # g:<group>

# X recursively, to indicate that execute permission should only be set on directories and not regular files, unless the file has the relevant execute permission.

setfacl -R -m g:<groupname>:rwX /shares/content
setfacl -R -m u:<user1>:- /shares/content   # remove permission for user1
setfacl -m d:g:<groupname>:rwx /shares/content      # default group
setfacl -m d:u:<user1>:- /shares/content            # defalult user

getfacl /shares/content # verify permission

chgrp -R managers /shares/cases
chmod g+s /shares/caese     # special permission
    # new file and directory will be owned by "managers" group

chmod 660 /share/cases/*    

setfacl -R -m g:contractros:rwX /shares/cases
setfacl -R -m u:contractor3:rX /shares/cases    # rX  capital x
setfacl -m d:g:contractors:rwx /shares/cases
setfacl -m d:u:contractor3:rx /shares/caess

getfacl /shares/cases   # verify permissions

ls -ld /shares/cases
drwrws---+ 2 manager1 managers      # + means acl


#***************** managing SELinux security
semanage fcontext restorecon chcon  

getenforce
setenforce 0
ls -lZ

# if move the file, it retains the context of where it was / inlcudes permission !!!!!
restorecon -R   # using restorecon to fix the SElinux permission issue

does not use chcon as it is not persistent


ls -Zd /virtual

semanager fcontext -l 

man semange-fcontext    # read this man page!!!!!!!!!!!
restorecon -R -v /web   # need to use apply the permission using restorecon

semanage fcontext -a -t httpd_sys-content_t "/virtual(/.*)?"    # set SELinux security to /virtual/  and all sub directories
restorecon -vvRF /virtual/  # enforce / set permission / apply permission
ls -lZd /virtual    # verify permission

semanage fcontext -lC   # show change


#********
http config file
/etc/httpd/conf/httpd.conf
sytemctl status httpd

systenclt enable --now httpd
sytemctl status httpd

curl http://servera.x.x.    

setenforce 0    # SELinux   # pemisive mode
setenforce 1    # enforce

ls -Zd /custom
ls -Zd /var/www/html    # verify the existing for reference
semanage fcontext -a -t httpd_sys_content_t '/custom(/.*)?' # create a rule
restorecon -vvRF /custom    # apply

then test the access
    curl http://servera.x.x.

#*************** adjust SElinux policy with booleans
setsebool
semanage boolean -l 
getsebool -a    # list SELinux 

man -k _selinx  
yum list availabe '*selinux*'   # verify SELinux policy


man -k _selinux | grep zabbix
man -K _selinux | grep zabbix_can_network   sm

semanage boolean -l | less  # provide description

getsebool -a | grep cgi     # allow to toggle

setsebool -P httpd_enable_cgi off   # take it a bit of time to setup


mkdir ~/pubic _html

getsebool -a | grep home
setsebool -P httpd_enable_homedirs on   # turn it on

vi /etc/httpd/conf.d/userdir.conf   # another http configuration file
# userDir disabled
userDir public_html # enable it/uncomment it

systemctl status httpd  # verify if httpd is running

#*********** investigating and resolving SELinux issues
sealert # troubleshooting tool

grep sealert /var/log/messages

sealert -l xxxxxx | less

#**** Exercise
# look at the status of the service
systemctl status httpd      # verify status first - troubleshooting, and check wheather it is enabled or disabled
systemctl enable --now httpd    # try to enable and start it now, and see if there is any issue

systemctl status httpd      # check service status, and verify any error
systemctl status httpd -l --no-pager    # show all on the same window, without need to scroll the page

# check message log
grep sealert /var/log/messages  # pay attention to "sealert" error and recommendation

sealert -l <id> | less   # run the command to see complete SELinux message


#************** managing basic storage
block deviece (physical disk, such as NVMe, SSD)

gpt partition scheme    # vs MBR paritioning scheme, 4 partitions (3 + extended (15 partition), and 2TB limit

# managing partitions with parted
parted /dev/vda print   # you could use fdisk - using for MBR

lsblk   # check disk and partition 1st

yum install gdisk

parted /dev/vda
    print

    quit

parted /dev/vdb mklabel gpt # wire a GPT disk lable
    Note: mklabel subcommmand wipes the existing partition

parted /dev/vda unit s print    # show unit

mkpart  # gpt partitioning  <------ using gpt partition format
    primary
    file system type: xfs   

parted /dev/vdb help mkpart # using help

udevadm settle  # to detect the new partition and to create teh associated device file under the /dev directory
        # verify the partition exist after creating

# deleting partitions
parted /dev/vdb     # delete the partition
    print
    rm 1    # delete number
    quit


gdisk   # gpt (fdisk MBR)   
gdisk /dev/vdd
    ?   help
    p
    n
    L   # show codes

    c   # change partition name
    W   # save / write
    Y   # confirm

mkfs.xfs /dev/vdd1  # format disk after create the GPT partition

Then mount the parition, so you can use it
blkid   # veriy UUID, after format disk     # lsblk --fs    <------ it does not show UUID=

mkdir /data # make the mount point first, before update /etc/fstab

cat /etc/fstab
            /data       file system option      
    UUID="xxxx" <mount point>   xfs     defaults    0 0 

systemctl daemon-reload # reload daemon after create the new partition

df -h /<new mountpint>      # verify new partition size

# ******* swap space concepts
yum install gdisk

        Memory size Swap size
Swap size:  2 - 8GB     2 x RAM
        8 - 64GB    1.5 x RAM


# create a swap partition
lsblk   # check disk and space

1. create partition
parted /dev/vdb
    print
    mkpart
    swap1   # swap
    linux-swap

gdisk

gdisk /dev/vdd
    n
    1
    +8GB
    L   (verify code)
    8200    (swap)
    p   print
    ?   help
    c       # change name
        swap1
    p   # print verify
    W
    Y   save

udevadm settle

# activating swap parittion

mkswap /dev/vdd1
    # verify UUID

# swap on | swap off

swapon -s   # swapon --show
swapon /dev/vdd1    # this is temp, if reboot, it will lost

# update fstab
/etc/fstab
    UUID="xxxx" swap swap defaults 0 0

swapon -s   # verify
swapon -a   # enable / activate swap 

blkid   # verify disk and partition

systemctl daemon-reload
then, reboot

swapon -s 
free    # verify swap is in-used

# to make swap file with different priority usage, update /etc/fstab
UUID="xxxxxxxx" swap swap defaults 0 0
UUID="xxxxxx" swap swap pri=10 0 0


parted /dev/vdb print   # verify /dev/vdb partitions

df -h   # verify the partitions and size


#********************* managng logical volumes  (LVM)
more flexible than normal partition

Physical devices (/dev/xxx)
physical volumes (PVs)
Volume Groups (VGs)   # one or more physical volumes    # pool of physical volumes
Logical Volumes (LVs)   # create LVs from VGs

cat /proc/partitions    # lsblk, lsbid

pvcreate /dev/vdb2 /devv/vdb1       # physical group
    
vgcreate vg01 /dev/vdb2 /dev/vdb1   # Volume Group

lvcreate -n lvdata -L <size> <vg-group-name>    # logical volume
    -L  700M    
    -l  a number of physical extends    

# format file system
mkfs -t xfx /dev/vg01/lvdata    

/dev/vgname/lvname
/dev/mapper/vgname-lvname

# create mount point
mkdir /mnt/data

# update /etc/fstab
/dev/vg01/lvdata /mnt/data xfs default 0 0

# mount 
mount /mnt/data

# Remove the physical volumes
pvremove /dev/vdb2 /dev/vdb1    # the data will be deleted

# Reviewing LVM status
pvdisplay   # display all LVM
pvdisplay /dev/vdb1

vgdisplay   # VG display
lvdisplay   # LV display


#*********** process ************
1. create the partitions first

2. Create physical volumes
    pvcreate /dev/vdb1 /dev/vdb2

3. blkid
    # verify the UUID

4. pvdisplay    # verify the physical volumes

5. create volume group
 vgcreate vgdata /dev/vdb1 /dev/vdb2

6. vgdisplay
   pvdisplay    # it shows it is assocation

7. lvcreate lvdata -L 5G vgdata

8. verify logical volume
 lvdisplay
 ls -l /dev/mapper/lvdata   # this is the symbolic link
 ls -l /dev/vgdata/lvdata       # this is the symbolic link

9. format logical volume
 mkfs.xfs /dev/lvdata 

10. mount the logical volume
mkdir /data
vim /etc/fstab
    /dev/vgdata/lvdata /data xfs default 0 0

11. mount
mount /data

12. Verify
df -h /data

13. Check and ensure we can write to /data
cp -a /etc/*.conf /data
ls /data | wc -l
ls /data

14. vgdisplay
   lvdisplay


# ************ extend logical volumes
pvcreate and vgextend, vgdisplay    # extend a volume group
pvmove and vgreduce     # reduce,  pvmove to move data from one volume group to another volume group
lvextend    # extend a logical volume
xfs_growfs  # resize XFS file system
resize2fs   # resize ext4 file system

# add new physical volume to the volume group
vgextend vgdata /dev/vdb3   # add /dev/vdb3 to vgdata
vgdisplay   # verify "Free PE / Size"


# Reducing a volume group (<-----Backup data!!!!)
pvmove PV_Device_Name   
        # Relocate any physical extends from the physical volumes you want to remove, to other physical volumes in the volume group
        Note: The other physical volumes must have sufficient number of free extends for the data

pvmove /dev/vdb3    # man pvmove
    # Linux will automatically choose and move PEs to other PVs
    # this command move the PEs (physical extends) from /dev/vdb3 to other PVs (Physical volumes) with free PEs in the same VG (Volume Group)

vgreduce VG_Name PV_Device_Name     # To remove the physical volume from a volume group
    vgreduce vg01 /dev/vdb3     # /dev/vdb3 is no longer part of vg01 (volume group)
                    # it can be deleted/remove from the system
                    # the partition still exist in /dev/vdb3, and it is still initialized

#******* Extending a logical volume 
vgdisplay vg01      # consider logical volumes
            # consider logical volume as container
            # verify "Free PE / size"

lvextend -L +<size> /dev/vgdata/lvdata      # size  +10G
        # lvextend -L <exact new size> /dev/vgdata/lvdata   # without "+" will be exact size
        Note: Ensure new size is bigger than existing size <Important ----------------
    Note:  
    a) lvextend -l <PEs>  /dev/vgdata/lvdata
    b) lvextend -l +50%FREE /dev/vgdata/lvdata  <---- 100%FREE  all available space

#** Extend the file system <---Important: until you extend the file system, the added free space is not availble to the Operating system
xfs_growfs mountpoint   # expand the file system to occupy the extended LV
xfs_growfs /mnt/data

    # Common Mistake
    After lvextend, but forget to run xfs_growfs
    Solution:  run "lvextend -r"

    lvextend -r -l +50%Free /dev/vgdata/lvdata  <------ this is recommeded!!!!

vgdisplay vgname


# Extend the swap space
1. Deactivate the swap space
swapoff -v /dev/vgname/lvname   # deactive the swap space on the logical volume
    Note: Ensure there is enough free memory or swap space to accept anything that needs to page in
        when the swap space on the logical volume is deactivated

2. Extend the logical volume
lvextend -l +extends /dev/vgname/lvname

3. format the logical volume as swap space
mkswap /dev/vgname/lvname

4. Activate the swap space
swapon -va /dev/vgname/lvname


#******* process to extend the logical volume
su -    # enter root shell
lvs
vgs

lsblk   # verify disks

parted /dev/vdb print   # verify partition using MBR or GPT, then using gdisk (gpt), or fdisk (MBR)
gdisk /dev/vdb
    n
    +size
    p   # verify
    w   # save / write change
    Y

partprobe    # take effect after change <----
lsblk   # verify and see the new partition is visible

pvcreate /dev/vdb3  # create physical volume after adding the new partition

vgextend vgdata /dev/vdb3   # add /dev/vdb3 to the volume group (extend)
vgs # verify more free space
lvs
df -h

lvextend -n /dev/vgdata/lvdata -r -L +size  # it will also extend file system <------

lvs
df -h

#********************* chapter 8 - Implementing advanced storage features with Stratis
Stratis architecture

device mapper (dm)

RHEL 8 introduces the Stratis storage management solution, Stratis run as a service, support LVM, XFS, and device mapper.
 - automatically manages LVM (Only when Stratis pool has enough storage size)
 - thin provisioning    (similar to VMware)
    - Thinpool subsystem manages the pools  
 - uses metadata to recognize pools, volumes and file systems (similar to VMware datastore)

Installer, RHEL virtualization, Atomic host using Stratis

1. install
yum install stratis-cli stratisd    # stratisd daemon
                    # stratis-cli

2. Activate stratis service
systemctl enable --now stratisd
systemctl status stratisd   # verify stratisd service

3. Create Stratis pool
stratis pool create pool1 /dev/vdb  # create new pool
    # each pool is a subdirectory under the /stratis directory

4. view the list of available pools, and verify storage size and usage
stratis pool list

5. How to add new block devices (physical disks, such as ssd, NVMe)
stratis pool add-data <poolname> /dev/vdx   # example,  stratis pool pool1 /dev/vdc

6. List/view block device of a pool
stratis blockdev list   # similar pvdisplay
stratis blockdev list <pool-name>   # view block device of pool1

7. How to use it as file system (we do not need to format, and set size, it automatically increase size)
7.1 Create a dynamic and flexible file system (xfs)
stratis filesystem create pool1 filesystem1 
stratis filesystem list     # verify after creation

7.2 support file system snapshot
stratis filesystem snapshot pool1 filesystem1 snapshot1

7.3 View list of available file systems
stratis filesystem list

8. Update /etc/fstab to persistently mount the Stratis file systems
8.1 create mountpoint
mkdir /stratisvol   # mkdir /<mountpoint-name>

8.2 lsblk
lsblk --output=UUID /stratis/pool1/filesystem1

8.3 update /etc/fstab
UUID="xxxx" /dir-name xfs defaults, x-systemd.requires=stratisd.service 0 0 # ensure stratisd.service is started
        Important: x-systemd.requires=stratisd.service 
            otherwise on reboot, get crital error

9. testing
mount /dir-name     # verify mountpoint works


#**** reading
https://stratis-storage.github.io


#***************
# How to create a large file
dd if=/dev/urandom of=/stratisvol/file2 bs=1M count=2048    
    Note:
    a. if=/dev/urandom      input file  /dev/urandom
    b. of=/stratisvol/file2     out file /stratisvol/file2
    c. bs=1M            block size 1M
    d. count=2048           total 2048 x 1M = 2GB

    Note: this demonstrates that stratis is increasing storage when requried

9. How to cleanup, remove snapshot and filesystem
stratis filesystem destroy stratispool1 stratis-filesystem1-snap
stratis filesystem destroy stratispool1 stratis-filesystem1


#***************** Compressing and Decompressing, Deduplication storage with VDO (virtual data optimizer)
RHEL 8 includes VDO, VDO is a linux device mapper driver that reducate disk space usage on block devices deduplication)

Note: VDO is good for VMs, and data storage (file systems)
 - Zero-block elimination
 - Deduplication
 - Compression (LZ4 compression and group them on 4KB blocks)

rpm -q vdo  # check vdo installad

1. Install packages
yum install vdo kmod-kvdo

2. Verify VDO service
systemctl status vdo.service

3. Create a VDO volume  (similar to logical volume)
vdo create --name=vdo1 --device=/dev/vdd --vdoLogicalSize=50G
    Note
    if omit the logical size, the resulting VDO volume will be the same size as its physical device (block device) (+100%FREE)

4. format and mount it under the file-system hierarchy on the system
mkfs.xfs -K /dev/mapper/vdo1    # -K    (uppercase)
udevadm settle
mkdir /mnt/vdo1
lsblk --output=UUID /dev/mapper/vdo1
vim
    UUID=xxxxx /mnt/vdo1 xfs defaults,x-systemd.requires=vdo.service 0 0
mount /mnt/vdo1

df -h | grep /mnt/vdo1


5. Verify a VDO volume
vdo status --name=vdo1  
    vdo status --name=vdo1 | grep -E 'Deduplication|Compression'    # -E    uppercase to check both
                                    # -e 'Deduplication' -e 'Compression'   -e to check each
vdostats --verbose
vdostats --human-readable
watch vdostats --human-readable     # watch the vdostats changes

ls -l /dev/mapper/vdo1      # symbolic link
ls -lh /mnt/vdo1/file1   # verify /mnt/vdo1/file1 size


6. List VDO volumes
vdo list

7. start and stop vdo service
vdo start       # ensure vdo service starts before /etc/fstab mount
vdo stop

# **** Exercise
yum list stratisd stratrics-cli
^lit^-y install     # use  the previous command

# How to mount the snapshot, and recover file and folder if required
mkdir /stratisvol-snap
mount /stratis/stratispool/stratis-snapshot /stratisvol-snap    # mount snapshot to mountpoint
ls /stratisvol-snap # verify content of the snapshot, and verify the files and folders exist in the snapshot mount

when finish, umount snapshot


#**** VDO
Note: disk size (block size is 5GB, and we can create VDO logicalsize bigger than physical block size if required)
vdo create --name=testvdo --device=/dev/vdd --vdoLogicalSize=50G    # create bigger vdo logical size

vdo list

mkfs.xfs -K /dev/mapper/testvdo # format file system

udevadm settle  # update
mkdir /testvdovol
lsblk --output=UUID /dev/mapper/testvdovol

vim /etc/fstab
    UUID=xxxxx /testvdovol xfs defaults,x-systemd.requires=vdo.service 0 0

mount /testvdovol

# testing
cp xxx.img /testvdovol1/test.img

watch vdostats --human-readable     # watch vdostats    with human readable format

reboot  # ensure the mount still works
    Note: it fails, then boot to emergency. The boot disk is readonly
    then need to mount as read write

#****** important - how to fix /etc/fstab error ********
mount -o remount,rw /   # it will allow you to mount "/" as read / write, and update /etc/fstab


#************************* Accessing netowrk attached stroage - from NFS client ---
NFS client
nfsconf (NFS v4)

using /etc/fstab

#** how to identify NFS share
showmount -e serverb.lab.com        # since RHEL 7

mount -t nfs serverb.lab:/ /mnt/nfstest     # temp mount
mount -t nfs serverb.lab:/shares /shares    # temp mount

ls /mnt/nfstest     # identify there are any subfolders that have been s
            # firewall 2048-2049 open <-------- verify

# persistent
vim /etc/fstab
<nfs-server>:/share /mountpoint nfs rw,soft 0 0 #persistent

mount /mountpoint

# RHEL 8 introduce nfsconf
/etc/nfs.conf   # config file - update the nfs.conf file        

# How to configure NFS configuration, two methods
a. Using nfsconf
sudo nfsconf --set nfsd ver4.2 y

b. Update /etc/nfs.conf by un-comment lines # this is easier methods
[nfsd]
vers4.2 = y

udp=n       # udp = n   better to not to use space
vers2=n
vers3=y
tcp=y
vers4=y
vers4.0=y
vers4.1=y
vers4.2=y
#************

#********* exercise
yum install autofs  # install autofs

mount -t nfs serverb.lab:/shares/direct/external /mnt/shares
ls /mnt/shares  # verify
umount /mnt/shares

#** configure autofs  mount
vim /etc/auto.master.d/direct.autofs    # Important: need to have .autofs  file extension
    /-  /etc/auto.direct        # example

    /external   -rw,sync,fstype=nfs4 serverb.lab:/shares/direct/external    # test first


mount -t nfs serverb.lab:/shares/indirect   /mnt/shares # indirect mount
ls /mnt/shares

vim /et/auto.master.d/indirect.autofs   # create this file
    /internal   /etc/auto.indirect  # ensure auto.indirect  file exist in the location /etc/auto.indirect

vim /etc/auto.indirect      # create this file
    *   -rw,sync,fstype=nfs4    serverb.lab:/shares/indirect/&
                # wildcard to be able to catch any new share folder

# then verify autofs is enable and running
systemctl status autofs
systemctl enable --now autofs

# testing mount and reboot, to ensure the nfs share is mounted after reboot
ls -l /internal
ls -l /external

echo "test only" > /external/testfile.txt   # create test file
cat /internal/{central,east,west}/README.txt    # read files from 3 directories

# ************ exercise nfs client
1. install autofs
yum install autofs

2. update /etc/nfs.conf
# using vim and "v" visual mode, and delete "#" for the highlighted lines, and add udp=n
udp=n
tcp=y
vers2=n
vers3=n     # using y if required
vers4=y
vers4.0=y
vers4.1=y
vers4.2=y

3. testing the nfs share and temp mount
mount -t nfs serverb.lab:/shares /mnt
ls -l /mnt
umount /mnt # after successfully verify, then create the auto mount

4. Create file /etc/auto.master.d/share.autofs  # need to end with .autofs  file extension for the file
vim /etc/auto.master.d/share.autofs
    /remote /etc/auto.shares        # anyone want to access /remote, look at this file /etc/auto.shares

5. Create an indirect map file
vim /etc/auto.shares
    *   -rw,sync,fstype=nfs4  serverb.com:/shares/&     
        # "&" will automatically change when it detects new share folder under /shares

6. start and enable autofs serivce
systemctl status autofs
systemctl enable --now autofs

7. testing  # important, to prevent reboot into emergency boot !! 
ls
cat
cp

8. reboot to verify the share automatically mounts
systemctl reboot

9. test again
su - <test-user>    # login as the test user shell

#********* automounter
the automunter is a service (autofs) that automatically mount NFS shares "on-demand", and will automatically unmount NFS shares when they are no longer being used.

# automunter benefits
a. Users does not need to have root privilegge to run mount and umount commands
b. NFS share configured in the automounter are available to all users on the machine
c. NFS shares are not permanently connected like entries in /etc/fstab, freeing network and system resources
d. automounter is configured on the client side, no server-side configuration is required
e. automounter uses the same options as the mount command, including security options
f. automounter supports both direct and indirect mount-point mapping, for flexibility in mount-point locations
g. autofs create and removes indirect mount points, eliminating manual management
h. NFS is the default automounter network file system, but other network file systems can be automatically mounted
        # autofs also works with other file system
i autfs is a service that is managed like other system services

# create an automount
sudo yum install autofs     # install autofs package
sudo vim /etc/auto.master.d/demo.autofs     # add a master map file to /etc/auto.master.d 
        # this file identifies the base directory used for mount points and identifies the mapping file used for creating the automounts


yum list autufs     # verify autos package
/etc/auto.master    # configuration file
    # include /etc/auto.master.d/*.autofs
    # Important:  the <name>.autofs must exist in /etc/auto.master.d  directory
        the name is not important

/shares  /etc/auto.demo      # watch this directory, and indirect map the share
vim /etc/auto.demo       # Important - the indrect map name "/etc/auto.demo" need to be the same file that is created
                    vim /etc/auto.demo

Then update the /etc/auto.demo  file with the mount that you need to use, example
    projects    -rw,sync    servera:/shares/projects

        Where  -rw,sync     # mount options starswith a dash character "-", and are comma separated
            -fstype=xfs # with NO white space in between <---------- importants
            -fstype=nfs4    # specific only use nfs4 if required
            -restrict   # use strict to treat errors when mounting file systems are fatal
                    # NFS share follows the  host:/pathname     pattern

#** indirect wildcard maps
    *  -rw,sync  servera:/shares/&      # entry in /etc/auto.demo  file
            # (*) is a variable/wildcard
            # servera has multiple subdirectories under /shares directory

            # When an NFS server exports multiple subdirectories within a directory, then then automounter (autofs)
            # can be configured to access any one of those subdirectories using a single wildcard mapping entry

            # the mount point is an asterisk character (*), and the subdirectory on the source location (NFS server export locations) is 
            # an ampersand character (&)
            # Example, when a user try to access /shares/projects, the mount point * will replaces the ampersand & in the source location (NFS export)
            # and servera:/shares/projects is mounted.

systemctl enable --now autfs    # enable autofs
systemctl start autofs


#******* direct maps
Direct maps are used to map an NFS share to an existing absolute path mount point, example
    /-  /etc/auto.direct    # all direct map entries use "/-" as the base dierctory
                    # the mapping file "/etc/auto.direct" contains the mount details

vim /etc/auto.direct
    /mnt/docs   -rw,sync    servera:/shares/docs    # /mnt/docs absolute path, 
                                # if user try to access /mnt/docs, then auto-mount  servera:/shares/docs
        # in this example,  /mnt  directory exist, and /mnt/docs  will be created and removed automatically by the autofs service

#*** Exercise
yum install autofs

#** direct automount
mount -t nfs servera.lab.com:/shares/direct/external /mnt/external
ls /mnt/external
umount /mnt/external    # testing and veify

vim /etc/auto.master.d/direct.autofs    # important -  file extension ".autofs"
    /-  /etc/auto.direct

vim /etc/auto.direct
    /external   -rw,sync,fstype=nfs4  servera.lab.com:/shares/direct/external


mount -t nfs serverb.lab.com:/shares/indirect  /mnt/indirect
ls /mnt/indirect
umount /mnt/indirect

#** indirect automount
vim /etc/auto.master.d/indirect.autofs
    /internal   /etc/auto.indirect  # if user is in directory, watch out for entry in this file /etc/auto.indirect
cat /etc/auto.master.d/indirect.autofs      # verify the entry
vim /etc/auto.indirect      # create the indirect file as required, specified in the file /etc/auto.master.d/indirect.autofs
    *   -rw,sync,fstype=nfs4    serverb.lab.com:/shares/indirect/&  # (*), and (&)

systemctl status autofs     # verify autofs service
systemctl enable --now autofs   # enable and start autofs at the same time

# testing the access then reboot
reboot


#********************** Control the boot process *****************
systemd -> target 

/boot/grub2/grub.cfg
/etc/grub.d/
/etc/default/grub
    grub2-mkconfig  <------------
/etc/dracut.conf.d/
    dracut
    lsinitrd
    initramfs
        /sbin/init

vim /etc/default/grub   <-------- grub2.mkconfig will make change to this file  # during boot, it reads this file
    GRUB_TIMEOUT=1      <------------ 1 Sec # by default

#*** targets
there are 4 targets:
    graphical.target
    multi-user.target   # text based only
    rescue.target       # sulogin prompt, basic system initization completed    # it will prompt for root password <-- sulogin
    emergency.target    # sulogin prompt, initramfs pivot complete, and system root mounted on / read only


systemctl list-dependencies graphical.target | grep target  # list all the dependencies target for graphical.target

systemctl list-units --type=target --all    # list all the available targets

grep AllowIsolate=yes *.target  # only allow isolate target can be set as default or change to boot

ls -l runlevel*.target  # it will show the symbolic link to the target
        # runlevel0.target, etc 
        # runlevel6.target

# verify default boot target
systemctl get-default
systemctl set-default (tab completion, and select the required target)

#**** change boot menu option at boot time, when system reboot, press "Escape key" to select boot option
    Red Hat Enterprise Linx ............ (version)
    Red Hat Enterprise Linux (0-rescure-xxxx)

Press e  to edit

Edit linux .....   and press ctrl-x (to reboot)
    .... ro console=tty0 systemd.unit=emergency.target no_timer_check ....  # to boot to emergency.target
            # note: do not press ctrl-c (grub prompt)


#****************************************************************************************
1. Verify default boot target
systemctl get-default

2. Switch/change to new isolat target
sudo systemctl isolate multi-user.target

3. Set default boot target
systemctl set-default <new target>

# to switch to rescure.target at boot time
a. escape when boot
b. press "e" to edit, and add  systemd.unit=rescure.target
   to the end of line "linux ($root).....  crashkernel=auto
Note:
    .... crashkernel=auto systemd.unit=rescure.target
c. press ctrl-x to restart
d. it will then prompt for root password (press enter)
e. press ENTER, after enter root password, system then boots up
f. type
    mount | grep vd[abc] to verify the boot partition (/dev/vda1, example) has "rw" (mounted as rw)


#****************************************
# how to reset root password
# Method 1:
1. reboot system
2. interupt boot, press "escape" key
3. press "e" to edit
4. find the line "linux...."
5. append rd.break
6. press ctrl-x to reboot
7. remount /sysroot as read/write
    mount -o remount,rw /sysroot
8. swith into a chroot, where /sysroot is treated as the root of the file system tree
    chroot /sysroot
9. set a new root password
    passwd root
10. relabel
    touch /.autorelabel
11. type "exit" twice
    the first exit (exits the chroot)
    the 2nd exit (exits the initramfs default shell)
12. system then reboot with the new root password


#*** verify log file
1. vim /etc/systemd/journald.conf
    storage=persistent      # make journal log persistent
2. systemctl restart systemd-journald.service   <------ good practice

# to inspect the logs of a previous boot 
    -b  # display messages since the last boot
journalctl -b -1 -p err     # show 1 back (last boot)


# enable root shell - Early debug shell  (******* Method 2 to change root password *************)
ctrl+Alt+F9 (tty9)  # during the boot sequence
            # the shell is automatically logged as root
            # admin can debug the system while the system is still booting
        Note: Don't forget to disable it when not need
            debug-shell.service
    
            Method:
            a. reboot system, press "e", edit linux.... line
            b. change to (add)
              console=tty0 systemd.debug-shell no_timer_check   # system.debug-shell
        It will boot as normal, the press ctrl+Alt+F9           # ctrl+Alt+F9
            then 
                passwd  # to change root password       # passwd root
            then 
                sudo systemctl stop debug-shell.service     # stop debug-shell.service


# **************** Repairing file sysstem issue at boot *****************
1. corrupted file system
2. /etc/fstab   # non-existent device, UUID, non-existent mount point, incorrect mount option, incorrect UUID value
        Result: boot in emergency shell, and "/" is mounted as "read only"
        # recommended using UUID <---------------

Solution:
$ mount -o remount, rw /    # remount "/" as read/write, "o" option
vim /etc/fstab          # then modify /etc/fstab and fix the issue
                # comment out the problem line / mount point and then troubleshooting
systemctl daemon-reload     # after edit /etc/fstab, reload daemon

After fixing the boot, and system boot normally, run
$ mount -a      # mount all entries in /etc/fstab, and see any issue
$ df -h         # check file system

#* reboot the system, and see if there is any issue with boot <-----------------
situation: When system boot, it boots to emergency shell, and the root password is incorrect

1. reboot, press "escape", and type "e" to edit boot
2. in "linux ...." line, after "ro" enter the following 
    systemd.unit=emergency.unit systemd.debug-shell     # could type both boot units
3. ctrl-x   # restart
4. In emergency shell, type Ctrl+Alt+F9
5. Then mount "/" as read/write
    mount -o remount, rw /      # enter the command 
6. passwd       # change root password
# if not able to fix the root password, then reboot into emergency target again, by edit "grub" process as outline above
    systemd.unit=emergency.unit
7. Reset root password again
    mount -o remount, rw /      # if see error message, pretend it does not happen, and keep typing
8. Checking/verify
    mount | grep vda        # verify mount
    mount -a            # verify /etc/fstab, and you will see error as it try to mount all entries, then fix the error line
    lsblk               # verify the UUID is correct, comment it out, or fix the issue
9. reboot and verify system boot successfully


#********************************* Managing netowrk security ****************************
firewalld <------------ frontend of nftables
SELinux

netfilter
nftables <------ using xtables-nft-multi    # to translate iptables objects directly into nftables rules and objects
iptables <--------- same as previous versions
nft <-------- RHEL 8 utility
ip6tables
ebtables

# minimum install, firewalld is NOT installed. Only included in "Base" install

#** firewalld is based on zones: trusted, home, internal, work, public
connection filters on interface

cd /usr/lib/firewalld/      # where firewalld configuration 
ls /usr/lib/firewalld/zones # all the zone xml files, filter traffic, source, device, interfaces

# firewalld also defines services   # ansible also defines service

ls -l /user/lib/firewalld/services  # verify the services, firewalld uses the xml files to define the services
                    # there are a lot of predefined services, you could update/modify the xml file
                    # Note: Upgrage may reverse the update *****

grep <port-number> *.xml    # check which xml has the port define

Note:
  Firewalld change has runtime and persistent state, need to use persistent

$ firewall-cmd --   (tab tab) for completion # lots of options
            # runtime and persistent
    firewall-cmd --reload       # runtime to ensure it take effect after change / runtime <-------------
    irewall-cmd --permanent     # ensure changes persistent     <-------------

# common use (do more reading)
    --get-active-zones
    --get-default-zone  # if no zone defined, it will be applied to default zone
    --list-all  # list just runtime
    --list-all --permanent
    --list-all-zones
    
    --add-service
    --add-port

#*** exercise
sudo yum list httpd mod_ssl # verify httpd and ssl package installed
^list ^install  # replace previous command "list" with "install", press ENTER

# modify index.xml
vim /var/www/html/index.html

curl http://servera.lab.com # test the web server from different machine
curl -k http://servera.lab.com  # -k    do not verify tls

#*********** troubleshooting firewall
sudo systemctl status nftables  # verify "Active", "Loaded"
sudo systemctl mask nftables    # "mask" prevent the service from started
systemctl status firewalld


#*********** SELinux port labeling
SELinux looks for standard port, if using non-standard port, need to specify in SELinux

semanage port -l    # list current port label assignments
semanage port -l | grep ftp

semanage port -l | grep -w 80   # w word

less /etc/services  # check this file for information
grep gopher /etc/services   # check specific service

# manage port labels
semanage port -a -t port_lable -p tcp|udp portnumber    # a "add", -t "type" -p "portocol"
    Example: semanager port -a -t gopher_port_t -p tcp 71
         semanager port -l | grep -w 71   # verify

# To view local changes to the default policy
semanage port -l -C # c "changes"       semanage port -lC   # uppercase "c"

Note:
1. The targeted policy ships with a large number of port types
2. Service specific SELinux man pages are in the selinux-policy-devel package, include SELinux types, booleans, and port types
$ yum install selinux-policy-devel
  yum install selinux-policy-doc    # 
  mandb     # update man database
  man -k _selinux   # search for _selinux
  man -k _selinux | grep http       # search for specific 

# Remove the label
semanage port -d -t gopher_port_t -port tcp 71      # d "delete"

# modify
semanage port -m    # modify

#******* troubleshooting
normally the service fails to start
    systemctl status sshd
    
check enforce mode, permisive mode (disable), look at alert log, audit log, 

#**************** Exercise - troublshooting
1. check service running status
systemctl enable --now httpd
systemctl status httpd
systemctl status httpd -l --no-pager    # make sure to ensure the margin of the screen show all

Note:
Could verify journalctl

2. setenforce 0     # set SELinux to pervasive mode ( disable)
3. systemctl start httpd    # try to start the service agin, and see the result
4. setenforce 1     # set SELinux to enforce mode
5. grep sealert /var/log/messages
From the message, look for issue and resolution
    sealert -l <xxxxxx> | less -X   # use "X" capital X option

look for help, suggestion or solution
    semanage port -a -t PORT_TYPE -p tcp 1001

look for standard port for example and verification of PORT_TYPE
    semanage port -l | grep -w 80   # search word 
        Note:
        The output suggest to use "http_port_t" port_type

Fix the issue
    semanage port -a -t http_port_t -p tcp 1001 
    semanage port -l | grep -w 1001

systemctl start httpd
systemctl status httpd

# check firewall rule
firewall-cmd --get-active-zones
firewall-cmd --list-all
    Check for the firewall port are allowed
firewall-cmd --list-all --permanent # check persistent firewall rule (different from runtime firewall rule)
firewall-cmd --permanent --add-port=1001/tcp    # add tcp 1001 to firewall as persistent rule
fireewall-cmd --reload  # need to reload firewall to take effect
firewall-cmd --list-all # verify and see the change have been committed

# testing the access again, to ensure the access is now working
curl http://server1.test.lab:1001   # test the new port
curl http://server1.test.lab


#************** Install Red Hat Enterprise Linux
1. manual installation (interactive)
    Note: configure Network & Host Name firt, before select "Installation Source"
2. kickstart file   (automated installation)
3. using the web console

# software
1. binary DVD (containing Anaconda), and BaseOS and AppStream package repositories
2. boot ISO, connects to network packages repositories
3. QCOW2 image to deploy virtual machine

# build images with Composer
Composer is a new tool available in RHEL 8. Build customs image for deploy in cloud and virtual envirnoments


#** Troubleshooting installation
During the RHEL 8 installation, Anacolnda provides two virtual consoles.
1. tmux     # software terminal multiplexer
        # the first console provides a shell prompt in the second window, used to enter command to inspect and troubleshooting while the installation continues
        # the other winodws provides diagnostic messages, logs and other information
    Ctrl+Alt+F1
    
    ctrl+b  # then press the number to access the window that you want to access
    ctrl+b 1    # In tmux, access the main information window, main information page
    ctrl+b 2    # provide a root shell, Anaconda stores the installation log files in /tmp directory
    ctrl+b 3    # displays contents of /tmp/anaconda.log file
    ctrl+b 4    # displays contents of /tmp/storage.log
    ctrl+b 5    # displays contents of /tmp/program.log
    
    Note: for compatibility with earlier version of RHEL
        Ctrl+Alt+F2 through Ctrl+Alt+F5 also present root shell during the installation

2. virtual console
    Ctrl+Alt+F6 # Access Anaconda graphical interface


#********* Automatically install with Kickstart
ksvalidator (test editor and check its syntax with ksvalidator)  - similar to Oracle Solars or unattended Setup answer file in Microsoft

1. Create a Kickstart file with the Kickstart Generator website
2. Modify an existing Kickstart file with a text editor and check its syntax with ksvalidator
3. Publish a Kickstart file to the installer
4. Perform a network Kickstart installation

#** Kickstart file - Can be fully automated or semi-automated
# comment

%   # directive
%end    # end directive

%packages   # package section
    %end    # end package directive

@^  # begin with @^  character, to specify modules, streams, and profiles
    @module:stream/profile

# Groups - have mandatory, default and optional components
    # Kickstart installs mandatory and default components
    # "-" precede -  to exclude a package or package group

# %pre %post contain shell scripting commands that further configure the system
%pre    # %pre script is executed befoer any disk partitioning is done
%post   # %post script is executed afte the installation completed
        # Any configuration can be done after the installation
    %end

# You must sepcify the primary Kickstart commands before the %pre, %post and %packages section

#******** Ansible is slowly replacing Kickstart files
Let kickstart start the installation, and Ansible continue the installation and configuration

#****** where to get more documentation
which ksvalidator   # verify whether ksvalidator has been installed
yum provides */ksvalidator  # verify which package contains ksvalidator
yum install pykickstart     # install the required package
rpm -qad '*kickstart"       # q query, a "all packages", d "documentation"

less /usr/share/doc/python3-kickstart/kickstart-docs.txt    # good documentation


#**** How to manually create the kickstart configuration file
1. Carry out the installation
2. anaconda-ks.cfg  # ~/anaconda.cfg    # the installation automatically creates the Kickstart installation file
            # Then use it for the standard SOE deployment

# ****** Kickstart Syntax
url --rul="https://..."
repo --name=

#* Partition commands
clearpart --all     # Remove partitions from the system prior to create new partitions. By default, no partitions are removed
part /home  # part  specifies the size, format, and name of a partition
autopart    # automatically creates a root partition, a swap partition, and an appropriate boot partition
ignoredisk  # 
    ignoredisk --drives=sdc # example to exclude sdc during the installation
bootloader  # define where to install the bootloader
    bootloader --location=mbr --boot-drive=sda  # example
volgroup, logvol    # Create LVM volume groups and logical volumes


#** Network commands
network     <---------------------------------------------- let user to define during the installation???
    network --device=eth0 --bootproto=hdcp  # example

firewall    # defind firewall configuration
    firewall --enabled -service=ssh,http

# Location, security command
lang    # sets the language and the default language of the installed system
    lang en_US.UTF-8

keyboard    # sets the system keyboard type
    keyboard --vckeymap=us --xlayouts=''

timezone    # define timezone, NTP servers, and whether the hardware clock uses UTC
    timezone --utc --ntpservers=ntp1.test.lab Australia/Sydney

authselect  # set up authentication options  man authselect(8)  for more details

rootpw      # defines the initial root password
    rootpw --plaintext <password>
    rootpw --iscrypted xxxxxxxxxxxxx    # encrypted

selinux     # Set the SELinux mode for the installation
    selinux --enforcing # Enforcement mode

services    # Defines the default set of services to run under the default systemd target
    services --disabled=firewalld --enabled=network,iptables

group / user    # create a local group or user on the system
    group --name=admins --gid=10001     # create local admin group
    user --name=<login-name> --gecos="Local Admin" --group=admins --password=<required-password> --plaintext

    Note: Or could use groupadd, useradd as part of the post installation script

# other Miscellaneous commands
logging # defines how Anaconda will perform logging during the installation
    logging --host=loghost.lab.local  --level=info

firstboot   # if enabled, the Setup Agent starts the first the first time the system boots
        # The initial-setup package must be installed
    firstboot --disabled    

reboot, poweroff, halt


rpm -ql pykickstart # query package and list
    # it will list the man page files   /usr/share/man1/ksverdiff.1.gz  # example
    # it contains executable        /usr/bin/ksverdiff
                        /usr/bin/ksvalidator

#** kickstart file can also be generated from Red Hat online
https://access.redhat.com/labs/kickstaratconfig
    KickStart Generator     # using the online kickstart generator

##**** Important: Try and error and testing
ksvalidator # only verify syntax


#************** Where to place the kickstart file
1. Network server available at install time usnig FTP, HTTP or NFS
2. USB disk or CD-ROM
3. Local hard disk on the system to be installed

#********* Boot Anaconda and point it to the kickstart file
Once the kickstart method is chosen, the installer is told to locate the kickstart file by passing the 
    inst.ks=<location> 
        # Example   
        # inst.ks=http://server/dir/file
        # inst.ks=ftp://server/dir/file
        # inst.ks=nfs://server:/dir/file
        # inst.ks=cdrom:device

Note:
sudo cat /root/anaconda-ks.cfg  > ~/kickstart.cfg   
    # ~/kickstart.cfg   will be owned by the user who run the cat command

sudo cp /root/anaconda-ks.cfg   > ~/kickstart.cfg
    # ~/kickstart.cfg   will be owned by root
    # Note: If this file is then copied to other directory, it will retain its permission and ownership, SELinux or access will be denied
    Solution:
        sudo chown student:  test.cfg   # this will change the owner and owner group to "student"
        chmod 664 test.cfg      # change permission

#******* install and configure virtual machines
KVM (kernel-based Virtual Machine) - RHEL 8 supports KVM

RHEL Virtualisation (RHV)   - similar to vCenter
Red Hat Virtualization Hypervisor   - similar to vSphere
Red Hat OpenStack Platform (RHOSP) - similar to vCenter / SDDC

Red Hat supports virtual machines running
    a. RHEL 6 and later
    b. Windows 10 and later
    c. Windows Server 2016 and later

#** Configuring a RHEL physical server as a virtualization host
1. Install RHEL
2. Install vir module
    yum module list virt
    yum module install virt

3. Verify sytem requirements (Intel and AMD supported version/edition)
    virt-host-validate
4. Managing virtual machines using cockpit  (web console)
    https://<server>:9090
   yum install cockpit-machines
   systemctl enable --now cockpit.socket
5. Navigate -> Virtual Machines -> Create new virtual machines
    # could use Kickstart, or manual installation

#**** Exercise
sudo cp /root/anaconda-ks.cfg ~/kickstart.cfg
sudo chmod a+r kickstart.cfg    # add all "read" permission
sudo chown student: kickstart.cfg   # change owner and owner group to "student" user, and "student" group
ls -l kickstart.cfg # verify permission and ownerships

vim kickstart.cfg   
    /reboot   #search reboot
    V   # capitial "v" visual mode, hightlight and "c" to change the highlighted text
    d$ (or "D" shift d) # delete the text from the cursor to the end of the line
    d0      # delete to the begin of the line

# check required service is running
    systemctl status <service>
# check firewall configuration to ensure the required port or service is allowed
    sudo firewall-cmd --list-all --permanent
# check that you can access the kickstart config file

# Finally, reboot the VM and install RHEL


#************************ Running Containers ************************************

Requirements
1. image    # required package, files
    # Example httpd

2. In RHEL, not require root to start container
3. podman   # tool to manage container
        # No daemon required
    a. It takes the "image" and starts the container, then deploy the image to the container
    b. could make change to the container
4. buildah  # tool - build new image
5. skopeo   # tool


# ** RHEL 8 VM
a. It has the required software, packages
b. create the container application
c. If VM need to be rebuilt or service, we could deploy the new container (same image) to another RHEL VM
d. K8s supports containers management
    - Using OpenShift cluster (better than native K8s for support)

#*********** Running a basic container
1. Need a container host (RHEL 8 VM)
a. Kernel
    - cgroups
    - selinux
    - namespaces
    - scccomp

b. Tools
    containter-tools
        - podman    # start/stop container
                # download the image from image registry
        - skopeo    # manage container image
        - buildah   # build image
        
all containers start as namespace0 by default   # the entire RHEL operational system
    Then, it starts/create new namespace inside namespace0, such as namespace1
        ps -ef  # shows the process running on namespace0


Image
    - contains all the required binary to support the required application

Deploy the image to namespace1 as container # example (we name the namespace1 as container)

#*** build a basic container
Image Registry
    1. Usng podmand to download the image from image registry
    2. Start the container (a deployed image)
        registry.example.com
        registry.example.com/username/imagename     (imagename/tag, is the version number of the image)
    quay.io  # RHEL image registry
        quay.io/user1/webserver1:version
        [registry/uesrname/image:tag]

Container is a instance of the running image


#*** Exercise
sudo yum install container-tools | tails -4 
    # silent install    yum install -y container-tools | tail -4

# connect to the image registry
$ podman login -u <username> -p <password> registry.access.redhat.com

$ padman images     # to view the images
$ podman pull registry.lab.com/rhel8/httpd-24:latest        # Example, download the container image
$ podman -run --name testweb -it registry.lab.com/rhel8/httpd-24  /bin/bash     # start the container image
                                            # start the bash shell so we can verify it
        # to identify the container by its id or name

$ ps aux    # verify the running image running process
$ id        # very the login user uid, gid inside the running container to review the running user
    uid=1001(default) gid=0(root) groups=0(root)    # it runs as default

#** To delete the running container
$ podman run --rm registry.lab.com/rhel8/httpd-24 httpd -v  # rm    remove/delete


# verify and running container image
$ podman ps # showing any running container images process
$ podman run -d registry.lab.com/rhel8/httpd-24:latest      # -d    "detached" / running at the backgroud
$ podman ps # it now shows the running container image, including its id, status, etc
$ podman images     # show any running images


podman exec -it <image-id> bash     # connects to the image, run the bash shell
ps -ef      # show the process running inside the container
ss -plnt    # try to run the command that does not include in the image, will not run

podman stop <container-id | name>
podman ps -a    

podman logs <container-id>  # view logs in container

podman rm <container-id>    # remove the container
podman ps -a        # verify to ensure the container has been deleted


#********** finding and manage container images
grep ^[^#] /etc/containers/registries.conf  # exclude start with #

podman info | grep -A15 registries      # 15 lines after regitries to find the information

podman search registry.redhat.io/mysel | head       # search image contain "mysql"
podman search --no-trunc registry.access.redhat.com/<image-name|string> | head -5   
    # search for images contains require string, do not truncate the line, show all information/continue on the next line to see all information

podman search --no-trunc --limit 3 --filter is-official=false registry.redhat.com/perl
    # search the registry for un-offical images that contains "perl", and return 3 results

# download the image, and store in the rhel container host. It will be ready for user to use, save time to download when needs
podman pull registry.lab.com/rhel8/httpd-24
podman images       # show / view available downloaded images


#** to learn more about the image - skopeo
skopeo inspect docker://regitry.lab.com/rhel8/httpd-24 | less
        # using skopeo  - manage and view detail information about the images
        # when not specify the tag, it assumes the latest
        # it has more metadata

Important: container starts from the image <--------------

podman images   # view available images
podman rmi registry.lab.com/rhel8/httpd-24  # remove image / delete the downloaded image

cat /home/student/.config/containers/registries.conf    # 
podman search registry.lab.com/ubi  # search for specific image

podman inspect registry.lab.com/rhel8/httpd-24 | head -18   # inspect / verify the image


#********** perform advanced container images
podman ps -a    # archived container    it is not currently running
podman logs <image-id| name>    # view container logs

# how to pass the variable to the container
podman run -d -e MYSQL_USER=test1 -e MYSQL_PASSWORD=password -e MYSQL_DATABASE=testdb -e MYSQL_ROOT_PASSWORD=Password01 registry.lab.com/hhel8/testingdb-01:01
podman ps   # verify the running container's container ID
podman stop <container-id>  # stop the running container before delete/remove it
podman rm <container-id>

# How to interactive with or using the container, such as using over specific port
podman run -d -p 3306:3306 -e MYSQL_USER=test1 -e MYSQL_PASSWORD=password -e MYSQL_DATABASE=testdb -e MYSQL_ROOT_PASSWORD=Password01 registry.lab.com/hhel8/testingdb-01:01
    # -p    (port)
    # <port-number>:<port-number>   port forwarding
    # when user connects to the port number against the container host, it will be forwared to the container that is listening on port

# testing from the conainter host
mysql -h127.0.0.1  -<sql-login-name> -<login credential> -P<port-num>   

podman kill -s      # send kill command to the container using the options "-s" (specified)
        podman kill -s <option> xxxx

podman restart      # restart the container, it will create the new container


#**** Exercise
podman run -u admin -p   # without password, it will prompt you for the password
mysql -u user1 -p --port=<portnum> --host=127.0.0.1 

podman run --name <run name> -it registry.lab.com/rhel8/httpd-24:12 /bin/bash   # -it   interactive terminal
                                        # /bin/bash bash shell
podman exec -l uptime   # verify container uptime   -l (reference the last container)

podman run --name --rm registry.lab.com/rhel8/httpd-24:102  cat/etc/<command>
        # -rm    when the container finish the task, or not running, remove it, so it will not be archived

podman stop -a  # stop "all" containers
podman rm -a    # remove/delete all containers

#*************** Attaching persistent storage to a container
1. create a directory (persistent directory) in the container host
mkdir -pv  ~/webcontent/html    # example, create a directory /webcontent/html  in user home directory
                # -pv   persistent volume
echo "testing page" > ~/webcontent/html/index.html  # create a index.html in the directory
ls -ld  ~/webcontent/html/  # verify all users (other) has read and execute permission
    drwxrwxr-x  /home/student/webcontent/html
ls -l ~/webcontent/html/index.html  # verify the newly created file all user has read permission
    -rw-rw-r--  /home/student/webcontent/html/index.html

2. Create a container using the image
podman login -u admin -p xxxxx registry.lab.com     # access to the registry
podman run -d --name testweb -p 8080:8080 -v ~/webcontent:var/www:Z registry.lab.com/rhel8/httpd-24:01
    # run a new container with name "testweb"
    # -d    run in the background (detach)
    # port forwarding 8080:8080 forwarding 8080 from containter host to the container
    # -v    using peristent storage (volume mount)
        -v ~/webcontent:var/www:Z   # mount the persistent volume ~/webcontent  (/home/webcontent)
                        # var/www   container mountpoint /var/www
                        # :Z (capital z)    SELinux container_file_t
                        # using the image  registry.lab.com/rehl8/httpd-24:01   (tag :01  /version 01)
                        Note: when without the tag (version), it will run "latest" version
        Note: it will download the image

3. Verify the running container
podman ps   # verify the container is running
curl http://localhost:8080  # test the website access on port 8080
skopeo inspect docker://registry.lab.com/rhel8/httpd-24 | grep A5 RepoTags
    # using skopeo to inspect the image
    # output/show the first 5 lines from the matching "RepoTags", it will show all the image tags (or versions)
    

4. stop and remove the testing container, and run different verion of the image
podman stop testweb
podman rm testweb
podman run -d 
podman run -d --name testweb -p 8080:8080 -v ~/webcontent:var/www:Z registry.lab.com/rhel8/httpd-24:latest
        # using volume mount  -v
        # it will use the persistent storage

podman ps   # verify the container is running
        # verify it is running the required version of the image
curl http://localhost:8080  # test the website access on port 8080
        # verify the web content is the same as it is running from the persistent storage


#************* managing container as services
When running container as service, it will run as systemd without root/privilege.
Normally, the services run at /etc/systemd/system directory

1. create a persistent volume directory in the container host (RHEL server) for the container (container service)
mkdir -pv  ~/.config/systemd/user   # create persistent volume directory in user home directory
                        /home/user1/.config/systemd/user

# How to
using systemctl user option (--user)
podman-generate     
podman-generate-systemd     # it will generate the systemd file for the container
                # it will allow 
loginctl enable linger      # this will enable the container service to run without the user being login

** Important Note:
    scale - using OpenShift

#********** Exercise
1. Create user which will run the container in container host
useradd svc-cont
echo redhat | passwd --stdin svc-cont   # update the password

2. ssh to container host as svc-cont

3. Create persistence storage and content
mkdir -p ~/.config/containers
cp /tmp/containers-services/registries.conf  ~/.config/containers
mkdir -p ~/webcontent/html/
echo "Testing Only" > ~/.config/containers/index.html   # create an index.html file
ls -ld webcontent/html/     # verify "others" has read/execute right
ls -l webcontent/html/index.html    # verify ohter has read permission

4. create the container and container service (systemd service)
podman login -u admin -p xxxx registry.lab.com
podman run -d --name testweb -p 8080:8080 -v ~/webcontent:/var/www:Z registry.lab.com/rhel8/httpd-01:01 # down the image from registry
curl http://localhost:8080  # redirect to container
mkdir-p ~/.config/systemd/user/     # host the container systemd files
cd ~/.config/systemd/user
podman generate systemd --name testweb --files --new    # create systemd from container
        # --new     It tells the container host to start the systemd when system starts, and stop systemd when system shutdown
podman stop testweb
podman rm testweb


Note: It generates the container systemd service file
~/.config/systemd/user/container-<container-name>.service
    [Unit]
        ...

    [Service]
        ...

    [Install]
        ...

5. Verify the container running
podman ps
systemctl --user daemon-reload      # --user   not use root privilege, run as "user mode"
systemctl --user enable --now container-testweb     # enable and start container-testweb service
podman ps   # verify the container service is running
systemctl --user stop container-testweb
podman ps -a    # show the archived container, the container-testweb service does NOT show as it has been removed
systemctl --user start container-testweb
podman ps   # it shows that container service is now running


6. Enable linger
loginctl enable-linger      # enable the container to start without require user login to container host
loginctl show-user svc-cont
systemctl reboot

7. Verify system reboot and verify container service is running
podman ps

#*** more 
1. install container-tools yum module
sudo yum module install -y container-tools | tail -4

2. ssh to container host as the podsvc user (container service user))
2.1 # access the image registry
podman login -u admin -p xxxx registry.lab.com

2.2 Inspect the image and version (tag)
skopeo inspect docker://registry.lab.com/rhel8/mariadb-10 | grep -A4 RepoTag

2.3 Create persistent storage
mkdir -pv ~/db_data
chmod 777 ~/db_data

2.4 Create the container 
podman run -d --name testdb -p 13306:3306 -v /home/podsvc/db_data:/var/lib/mysql/data:Z -e MYSQL_USER=test1 -e MYSQL_PASSWORD=xxx -e MYSQL_DATABASE=test -e MYSQL_ROOT_PASSWORD=xxx registry.lab.com/rhel8/mariadb-02:02
        # -e   using variable

2.5 testing
container-review/testdb.sh  # run the test script
        # verify the result successfully query the database


2.6 create container service systemd file
mkdir -p ~/.config/systemd/user/
cd ~/.config/systemd/user/
podman generate systemd --name testdb --file --new  # --file  create the file to the current directory
podman stop testdb
podman rm testdb
systemctl --user daemon-reload
systemctl --user enable --now container-testdb.service  # enable and start the container service
loginctl enable-linger  # enable the container starts as systemd unit
podman ps



#************** Review labs
# How to download file using wget
wget http://<URL>/<file-name>   # wget get the file will not be executable
chmod a+x <filename>    # allow everyone to run / execute

# cron job
crontab -e  # edit cron job
    0 19-21 * * Mon-Fri /home/<path>/<script-name>      # on the hour, 19-21 (hour), Monday to Friday, <path to script>

# swap partition
swapon -s   # swapon --shell    show unit
swapon --show   # show partion used, size, and usage

# create group as system group, rather than user group (starts from gid 1000)
groupadd -r <groupname>

# temp file will auto delete after certain time
man -k tmpfles  # reading   tmpfiles.d (5)
mandb   # if above command has no output / not found, update mandb
vim /etc/tmpfiles.d/<filename>.conf # create the file that will configure the directory to be auto cleanup after certain time
    d /run/<directory-required>  0700  root  root 30s
        # d directory
        # /run/<directory>  directory / path to be clean up
        # 077       permission required, change to what is required
        # root root required user, and user group
        # 30s       clean up every 30 seconds

systemd-tmpfiles --create /etc/tmpfiles.d/<filename>.conf   # need to use systemd-tmpfiles  to create the directory
ls -ld /run/<directory-required>    # verify the run directory has been created as specify in the config file
    Note:
        any file in /run/<directory-required>   directory will be automatically cleanup after 30s
systemd-tmpfiles --clean /etc/tmpfiles.d/<filename>.conf    # to manually run the tmp file clean process, rather than wait for 30s


# access control list (ACL)
setfacl -r -m g:<groupname>:rwx /<directory>
setfacl -r -m u:<user1>:rw /<directory>
setfacl -r -m d:g:<groupname>:rwx /<directory>  # d: set default ACl for new files group access permission
setfacl -r -m d:u:<user1>:rwx /<directory>  # d: set default ACl for new files user access permission
getfacl /<directory>


##*** manage system security
# SELinux
getenforce  # verify SELinux mode
vim /etc/selinux/config     # modify the configuration, and set the required mode
    SELINUX=permissive  # set to permissive mode, or enforce (enforce mode)

setenforce 0    # set to permissive mode    setenforce 1  <---- set to enforce mode
sestatus    # show current mode, and mode from config file

getsebool -a | grep home    # verify SELinux boolean
setsebool -P use_nfs_home_dirs

systemctl status httpd -l --no-pager

sealert -l <long id> | less -X      # -X  capaitcal x  <--------- do not remove screen after exit out

^start^status   # <------------------- replace the previous commmand "start" with "status" and rerun the command

chown -R containers: /srv/web       # -R    recursive change /srv/web folder and subfolder, grant "container" user as the owner
                        # and groups as "containers" group

podman run -d --name web -p 8888:8080 -v /srv/web:/var/www:Z -e HTTPD_MPM=event registry.lab.com/rhel8/httpd-02:02
        # -p 8888:8080   port forwarding host port 8888 to 8080 on the container
        # -v /srv/web:/var/www      volume mount container host /srv/web to container /var/www
        # :Z    capital z       SELinux security and firewall
        # -e HTTPD_MPM=event        Set environment variable (-e)  HTTPD_MPM to value "event"