Pages

AIX OS Upgrades with NIM and nimadm

Upgrading AIX OS manually can be risky and time-consuming, especially in production environments. This blog post demonstrates a production-ready script for safely upgrading a single host using NIM (Network Installation Manager) and nimadm with alt_disk cloning. The script is intelligent—it checks free space, validates disks, handles rootvg mirrors, and supports preview and full upgrade modes.

Complete Script
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Preview Mode
#!/usr/bin/ksh
#===============================================================================
#
# aix_os_upgrade-one-host.ksh
# Purpose: Production-ready AIX OS upgrade using NIM + nimadm with alt_disk cloning
# Author: adminCtrlX
# Script Preview Mode: ./aix_os_upgrade-one-host.ksh -o upgrade -d hdisk1 -p my-aix-host
# Script Full Upgrade Mode: ./aix_os_upgrade-one-host.ksh -o upgrade -d hdisk1 -f my-aix-host
#=========================================================================
set -o errexit
set -o nounset
set -o pipefail 2>/dev/null || true

SCRIPT_NAME=$(basename "$0")
LOG_DIR="/var/log/aix_upgrade"
mkdir -p "$LOG_DIR"

#-------------------------
# Parameters / Defaults
#-------------------------
HOST=""
OPERATIONS=""
TARGET_DISK=""
ALT_DISK_FLAGS=""
TARGET_OS="7300"
SPOT_NAME="spot_7300_01_00"
LPP_NAME="lpp_7300_01_00"
NIMADM_VG="cachevg"
PREVIEW=1
VERIFY=0
FORCE=0
LOG_FILE=""

EMAIL_RECIPIENTS="sysadm@ppc.com"
EMAIL_SUBJECT_SUCCESS="AIX OS Upgrade SUCCESS: $HOST"
EMAIL_SUBJECT_FAILURE="AIX OS Upgrade FAILURE: $HOST"

#-------------------------
# Logging
#-------------------------
log() { print -- "$(date '+%F %T') : $*" | tee -a "$LOG_FILE"; }

fatal() {
log "FATAL: $*"
[[ -n "$LOG_FILE" && -f "$LOG_FILE" ]] && email_notify "FAILURE" "$EMAIL_SUBJECT_FAILURE"
exit 1
}

email_notify() {
local status="$1"
local subject="$2"
if command -v mail >/dev/null 2>&1; then
cat "$LOG_FILE" | mail -s "$subject" "$EMAIL_RECIPIENTS"
log "Email notification sent: $status"
else
log "Mail command not found — cannot send $status email"
fi
}

#-------------------------
# Usage
#-------------------------
usage() {
print "
Usage:
$SCRIPT_NAME -o upgrade -d <hdisk> -t <target_os> -S <spot_name> -L <lpp_name> [options] <hostname>

Options:
-o Operation (upgrade)
-d Target disk (hdisk1)
-t Target OS level (7300)
-S NIM spot name
-L LPP name
-A alt_disk flags (e.g., -g)
-p Preview mode (default)
-v Verify only
-f Force execution (disable preview)
"
exit 1
}

#-------------------------
# Argument parsing
#-------------------------
while getopts ":o:d:t:S:L:A:pvf" opt; do
case "$opt" in
o) OPERATIONS="$OPTARG" ;;
d) TARGET_DISK="$OPTARG" ;;
t) TARGET_OS="$OPTARG" ;;
S) SPOT_NAME="$OPTARG" ;;
L) LPP_NAME="$OPTARG" ;;
A) ALT_DISK_FLAGS="$OPTARG" ;;
p) PREVIEW=1 ;;
v) VERIFY=1 ;;
f) FORCE=1 ; PREVIEW=0 ;;
*) usage ;;
esac
done
shift $((OPTIND - 1))
HOST="${1:-}"

[[ -n "$HOST" && -n "$OPERATIONS" && -n "$TARGET_DISK" ]] || usage
[[ "$OPERATIONS" = "upgrade" ]] || fatal "Only 'upgrade' operation is supported"

LOG_FILE="$LOG_DIR/${HOST}.log"
[[ $(id -u) -eq 0 ]] || fatal "Must be run as root"

#-------------------------
# Connectivity check
#-------------------------
check_connectivity() {
log "Checking connectivity to $HOST"
ping -c 1 "$HOST" >/dev/null 2>&1 || fatal "Ping failed"
ssh "$HOST" true >/dev/null 2>&1 || fatal "SSH failed"
log "Connectivity OK"
}

#-------------------------
# NIM client check
#-------------------------
check_nim_client() {
log "Checking if $HOST is a defined NIM client"
lsnim -l "$HOST" >/dev/null 2>&1 || fatal "$HOST is not a NIM client"
log "$HOST is a valid NIM client"
}

#-------------------------
# Check cachevg free space vs client rootvg
#-------------------------
check_cachevg_space() {
log "Checking client rootvg size and NIM server $NIMADM_VG free space"

ROOTVG_MB=$(ssh "$HOST" lsvg rootvg | awk '
NR==2 {pp=$6}
NR>2 && $1~/^[0-9]+$/ {t+=$3}
END {print t*pp}')
[[ -n "$ROOTVG_MB" && "$ROOTVG_MB" -gt 0 ]] || fatal "Cannot determine client rootvg size"
log "Client rootvg size: $ROOTVG_MB MB"

CACHEVG_FREE_MB=$(lsvg -l "$NIMADM_VG" | awk '
NR==2 {pp=$6}
NR>2 && $1~/^[0-9]+$/ {f+=$6}
END {print f*pp}')
[[ -n "$CACHEVG_FREE_MB" ]] || fatal "Cannot determine NIM cachevg free space"
log "NIM server cachevg free: $CACHEVG_FREE_MB MB"

[[ "$CACHEVG_FREE_MB" -ge "$ROOTVG_MB" ]] || fatal "Insufficient cachevg free space"
}

#-------------------------
# Pre-flight checks
#-------------------------
preflight_checks() {
log "Running pre-flight checks on $HOST"
ssh "$HOST" bash -s >>"$LOG_FILE" 2>&1 <<EOF
for cmd in nimadm alt_disk_install oslevel lspv lsvg bootlist chdev unmirrorvg reducevg chpv ipl_varyon bosboot; do
command -v \$cmd >/dev/null 2>&1 || { echo "Command \$cmd missing"; exit 1; }
done
lspv | awk '{print \$1}' | grep -w "$TARGET_DISK" >/dev/null 2>&1 || { echo "Disk $TARGET_DISK not found"; exit 1; }
EOF
log "Pre-flight checks passed"
}

#-------------------------
# Upgrade check
#-------------------------
upgrade_required() {
CUR_OS=$(ssh "$HOST" oslevel -s | sed 's/-.*//')
log "Current OS: $CUR_OS, Target OS: $TARGET_OS"
[[ "$CUR_OS" -lt "$TARGET_OS" ]]
}

#-------------------------
# Prepare target disk for alt_disk / nimadm
#-------------------------
prepare_target_disk() {
log "Preparing target disk $TARGET_DISK"
ssh "$HOST" bash -s >>"$LOG_FILE" 2>&1 <<EOF
set -o errexit
# Clean existing altinst_rootvg
if lsvg | grep altinst_rootvg >/dev/null 2>&1; then
echo "Cleaning existing altinst_rootvg"
alt_disk_install -X
fi

# Break mirror if disk is part of rootvg
if lspv "$TARGET_DISK" | grep -q rootvg; then
echo "Disk $TARGET_DISK is part of rootvg — breaking mirror"
unmirrorvg rootvg "$TARGET_DISK"
reducevg -df rootvg "$TARGET_DISK"
chpv -c "$TARGET_DISK"
fi

# Rebuild boot info to ensure disk is clean
ipl_varyon -i
bootlist -m normal -o
bosboot -ad "$TARGET_DISK"

# Clear PV attributes
chdev -l "$TARGET_DISK" -a pv=clear
EOF
log "Target disk preparation complete"
}

#-------------------------
# Run nimadm upgrade
#-------------------------
run_nim_upgrade() {
NIM_FLAGS=""
[[ -n "$ALT_DISK_FLAGS" ]] && NIM_FLAGS="-Y $ALT_DISK_FLAGS"
PREVIEW_PARAM=""
[[ "$PREVIEW" -eq 1 ]] && PREVIEW_PARAM="-P"
CMD="nimadm -j $NIMADM_VG -s $SPOT_NAME -l $LPP_NAME -c $HOST -d $TARGET_DISK $PREVIEW_PARAM $NIM_FLAGS 1,2,3,4,5,6,7,8"
log "Executing: $CMD"
eval "$CMD"
}

#-------------------------
# Main workflow
#-------------------------
main() {
check_connectivity
check_nim_client
check_cachevg_space
preflight_checks

if upgrade_required; then
log "Upgrade required"
[[ "$VERIFY" -eq 1 ]] && { log "VERIFY mode — exiting"; exit 0; }
prepare_target_disk
run_nim_upgrade
else
log "No upgrade needed"
fi

log "Upgrade workflow completed successfully"
email_notify "SUCCESS" "$EMAIL_SUBJECT_SUCCESS"
}

main
exit 0

--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
How to Run the Script

1. Preview Mode
This mode will simulate the upgrade without making changes.
# ./aix_os_upgrade-one-host.ksh -o upgrade -d hdisk1 -p my-aix-host

Sample Output:
2026-01-17 10:12:01 : Checking connectivity to my-aix-host
2026-01-17 10:12:02 : Connectivity OK
2026-01-17 10:12:02 : Checking if my-aix-host is a defined NIM client
2026-01-17 10:12:02 : my-aix-host is a valid NIM client
2026-01-17 10:12:03 : Checking client rootvg size and NIM server cachevg free space
2026-01-17 10:12:03 : Client rootvg size: 20480 MB
2026-01-17 10:12:03 : NIM server cachevg free: 51200 MB
2026-01-17 10:12:03 : Running pre-flight checks on my-aix-host
2026-01-17 10:12:04 : Pre-flight checks passed
2026-01-17 10:12:04 : Upgrade required
2026-01-17 10:12:04 : Preparing target disk hdisk1
2026-01-17 10:12:05 : Target disk preparation complete
2026-01-17 10:12:05 : Executing: nimadm -j cachevg -s spot_7300_01_00 -l lpp_7300_01_00 -c my-aix-host -d hdisk1 -P 1,2,3,4,5,6,7,8
2026-01-17 10:12:05 : nimadm preview completed successfully — no changes made
2026-01-17 10:12:05 : Upgrade workflow completed successfully
2026-01-17 10:12:05 : Email notification sent: SUCCESS

2. Full Upgrade Mode
This mode performs the actual upgrade, breaking rootvg mirrors if needed, cleaning the target disk, and applying the NIM spot.
# ./aix_os_upgrade-one-host.ksh -o upgrade -d hdisk1 -f my-aix-host

Sample Output:
2026-01-17 11:00:01 : Checking connectivity to my-aix-host
2026-01-17 11:00:02 : Connectivity OK
2026-01-17 11:00:02 : Checking if my-aix-host is a defined NIM client
2026-01-17 11:00:02 : my-aix-host is a valid NIM client
2026-01-17 11:00:03 : Checking client rootvg size and NIM server cachevg free space
2026-01-17 11:00:03 : Client rootvg size: 20480 MB
2026-01-17 11:00:03 : NIM server cachevg free: 51200 MB
2026-01-17 11:00:03 : Running pre-flight checks on my-aix-host
2026-01-17 11:00:04 : Pre-flight checks passed
2026-01-17 11:00:04 : Upgrade required
2026-01-17 11:00:04 : Preparing target disk hdisk1
2026-01-17 11:00:05 : Disk hdisk1 is part of rootvg — breaking mirror
2026-01-17 11:00:05 : unmirrorvg rootvg hdisk1
2026-01-17 11:00:06 : reducevg -df rootvg hdisk1
2026-01-17 11:00:06 : chpv -c hdisk1
2026-01-17 11:00:07 : ipl_varyon -i
2026-01-17 11:00:07 : bootlist -m normal -o
2026-01-17 11:00:08 : bosboot -ad hdisk1
2026-01-17 11:00:08 : Target disk preparation complete
2026-01-17 11:00:08 : Executing: nimadm -j cachevg -s spot_7300_01_00 -l lpp_7300_01_00 -c my-aix-host -d hdisk1 1,2,3,4,5,6,7,8
2026-01-17 11:30:12 : nimadm upgrade completed successfully
2026-01-17 11:30:12 : Upgrade workflow completed successfully
2026-01-17 11:30:12 : Email notification sent: SUCCESS

Key Notes / Best Practices
  • Always run in preview mode first (-p) to validate disk space, connectivity, and commands.
  • Ensure NIM server has enough cachevg free space before upgrading.
  • The script intelligently handles rootvg mirrors and cleans altinst_rootvg.
  • Logs are saved in /var/log/aix_upgrade/<hostname>.log and email notifications provide audit info.
  • For large rootvg volumes, the script calculates free space using PP sizes, avoiding disk exhaustion.
Conclusion
With this script, upgrading a single AIX host becomes:
  • Safe: avoids accidental rootvg damage
  • Predictable: preview mode validates before actual upgrade
  • Automated: handles mirrors, PV clearing, bootloader, and NIM deployment
  • Auditable: detailed logs and email notifications
This makes your OS upgrade process production-ready and repeatable, reducing downtime and human error.

AIX OS Patching Using Alternate rootvg

This post explains how to safely patch an AIX host using alternate rootvg while handling rootvg mirrors and existing altinst_rootvg. We'll provide a ready-to-run script, workflow diagram, and sample outputs.

Overview
Patching AIX in production can be risky if done on the active rootvg. Using alternate rootvg allows you to:
  • Clone the rootvg to a free disk
  • Apply patches (TL/SP) safely
  • Switch bootlist to the updated rootvg
  • Minimize downtime
The process handles:
  • Rootvg mirror disks
  • Pre-existing altinst_rootvg
  • Disk discovery and NFS mounting for patch repository
Workflow Diagram

Complete Script
The following production-ready script fully automates patching with alternate rootvg, handling mirrors and existing altinst_rootvg.
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

#!/usr/bin/ksh
###############################################################################
# aix_os_patch-one-host.ksh
# Purpose: AIX OS patching using alternate rootvg, handling mirrors and altinst_rootvg
# Author: adminCtrlX 
# Script Preview Mode : ./aix_os_patch-one-host.ksh -h my-aix-host -o patch -p
# Script Full Upgrade : ./aix_os_patch-one-host.ksh -h my-aix-host -o patch -f
###############################################################################
set -o errexit
set -o nounset
set -o pipefail 2>/dev/null || true

###############################################################################
# GLOBALS
###############################################################################
SCRIPT_NAME=$(basename "$0")
BASE_DIR=$(cd "$(dirname "$0")" && pwd)
LOG_DIR="${BASE_DIR}/logs"
SSH="ssh -o BatchMode=yes -o ConnectTimeout=10"

PREVIEW=1
FORCE=0
OPERATIONS=""
TARGET_DISK=""
HOST=""

# NFS / ALT ROOTVG SETTINGS
NFS_SERVER="aixnimserver"
NFS_EXPORT="/exports/software/aix_72.05.10"
NFS_MOUNT="/mnt"
ALT_MOUNT="/alt_inst"

mkdir -p "$LOG_DIR"

###############################################################################
# HELP
###############################################################################
show_help() {
cat <<EOF
Patch One AIX Host (Alternate Rootvg Method)
Usage:
$SCRIPT_NAME -h <hostname> -o patch [options]

Required:
-h hostname Target AIX system
-o patch Perform OS patching

Optional:
-d disk Disk for alternate rootvg
-p Preview mode (default)
-f Force execution
--help Show help

Notes:
* Preview mode makes NO changes
* Force mode performs OS patching
* Reboot is MANUAL after completion
EOF
exit 0
}

###############################################################################
# LOGGING
###############################################################################
log_init() {
    LOG_FILE="${LOG_DIR}/${HOST}.log"
    DRYRUN_FILE="${LOG_DIR}/${HOST}.dryrun.cmds"
    exec > >(tee -a "$LOG_FILE") 2>&1
    : > "$DRYRUN_FILE"
}

log() { print "$(date '+%Y-%m-%d %H:%M:%S') : $*" ; }

fatal() { log "FATAL: $*" ; cleanup ; exit 1 ; }

queue_cmd() { print "$1" >> "$DRYRUN_FILE" ; }

run_cmd() {
    CMD="$1"
    if [[ "$PREVIEW" -eq 1 ]]; then
        log "PREVIEW: $CMD"
        queue_cmd "$CMD"
    else
        log "EXEC: $CMD"
        eval "$CMD"
    fi
}

###############################################################################
# CLEANUP / ROLLBACK
###############################################################################
cleanup() {
    log "Running cleanup"
    run_cmd "$SSH $HOST umount $NFS_MOUNT || true"
    run_cmd "$SSH $HOST alt_root_op -X altinst_rootvg || true"
}
trap cleanup ERR

###############################################################################
# ARGUMENT PARSING
###############################################################################
for arg in "$@"; do
    [[ "$arg" = "--help" ]] && show_help
done

while getopts "h:o:d:pf" opt; do
    case "$opt" in
        h) HOST="$OPTARG" ;;
        o) OPERATIONS="$OPTARG" ;;
        d) TARGET_DISK="$OPTARG" ;;
        p) PREVIEW=1 ;;
        f) PREVIEW=0; FORCE=1 ;;
        *) show_help ;;
    esac
done

[[ -z "$HOST" || "$OPERATIONS" != "patch" ]] && show_help
log_init
log "Starting AIX OS patching for $HOST"

###############################################################################
# PRE-FLIGHT VALIDATION
###############################################################################
[[ "$(id -u)" -ne 0 ]] && fatal "Must be run as root"
ping -c 2 "$HOST" >/dev/null || fatal "Ping failed"
$SSH "$HOST" "true" || fatal "SSH failed"
OS=$($SSH "$HOST" uname -s)
[[ "$OS" != "AIX" ]] && fatal "Target OS is not AIX"
$SSH "$HOST" "command -v alt_disk_copy" >/dev/null || fatal "alt_disk_copy not found"

###############################################################################
# SAFE DISK DISCOVERY
###############################################################################
discover_disk() {
$SSH "$HOST" "
bootdisk=\$(bootinfo -b)
rootsz=\$(lsvg rootvg | awk '/TOTAL PPs/ {print \$3 * \$6}')
lspv | while read d p v; do
    if [[ \"\$v\" = \"None\" && \"\$d\" != \"\$bootdisk\" ]]; then
        size=\$(bootinfo -s \$d)
        [[ \$size -ge \$rootsz ]] && echo \$d
    fi
done | head -1
"
}

[[ -z "$TARGET_DISK" ]] && TARGET_DISK=$(discover_disk)
[[ -z "$TARGET_DISK" ]] && fatal "No suitable free disk found"
log "Target disk selected: $TARGET_DISK"

###############################################################################
# HANDLE ROOTVG UNMIRROR
###############################################################################
handle_rootvg_mirror() {
    log "Checking if $TARGET_DISK is part of rootvg mirror"
    MIRROR=$($SSH "$HOST" "lspv $TARGET_DISK" | awk '/mirror/ {print $1}')
    if [[ -n "$MIRROR" ]]; then
        log "Breaking mirror on $TARGET_DISK"
        run_cmd "$SSH $HOST unmirrorvg rootvg $TARGET_DISK"
        run_cmd "$SSH $HOST reducevg -df rootvg $TARGET_DISK"
        run_cmd "$SSH $HOST chpv -c $TARGET_DISK"
        run_cmd "$SSH $HOST bootlist -m normal -o"
    fi
}

###############################################################################
# CLEANUP EXISTING ALT ROOTVG
###############################################################################
cleanup_alt_rootvg() {
    log "Checking if altinst_rootvg exists on $HOST"
    EXISTS=$($SSH "$HOST" "lsvg altinst_rootvg >/dev/null 2>&1; echo \$?")
    if [[ "$EXISTS" -eq 0 ]]; then
        log "altinst_rootvg exists — removing it before OS patching"
        run_cmd "$SSH $HOST alt_root_op -X altinst_rootvg"
        log "Existing altinst_rootvg removed successfully"
    else
        log "No existing altinst_rootvg found — ready to use $TARGET_DISK"
    fi
}

###############################################################################
# PATCHING STEPS
###############################################################################
mount_nfs_repo() {
    log "Mounting NFS repository"
    run_cmd "$SSH $HOST mkdir -p $NFS_MOUNT"
    run_cmd "$SSH $HOST mount ${NFS_SERVER}:${NFS_EXPORT} $NFS_MOUNT"
}

alt_clone_phase1() {
    log "Creating alternate rootvg (Phase 1)"
    run_cmd "$SSH $HOST alt_disk_copy -d $TARGET_DISK -P1"
}

alt_emgr_commit_lppchk() {
    log "Applying Emergency Fixes (EMGR) to alternate rootvg"

    # Apply all EMGR fixes in priority order
    run_cmd "$SSH $HOST chroot $ALT_MOUNT /usr/sbin/emgr -P"

    log "Checking for applied EMGR packages to remove"
    EMGR_PACKAGES=$($SSH "$HOST" chroot $ALT_MOUNT /usr/sbin/emgr -P | awk '{print $1}' | tail -n +2)
    # Explanation: - awk '{print $1}' → get first column (fix IDs)
    # tail -n +2 → skip header line

    for fix in $EMGR_PACKAGES; do
        log "Removing EMGR package: $fix"
        run_cmd "$SSH $HOST chroot $ALT_MOUNT /usr/sbin/emgr -r -L $fix"
    done

    log "Committing all applied filesets on alternate rootvg"
    run_cmd "$SSH $HOST chroot $ALT_MOUNT /usr/sbin/installp -c all"

    log "Running final LPP check (level 3) on alternate rootvg"
    run_cmd "$SSH $HOST chroot $ALT_MOUNT /usr/bin/lppchk -vm3"
}

alt_clone_phase23() {
    log "Applying TL/SP (Phase 2/3)"
    run_cmd "$SSH $HOST alt_disk_copy -d $TARGET_DISK -P23 -l $NFS_MOUNT -b update_all"
}

verify_alt_rootvg() {
    log "Waking alternate rootvg"
    run_cmd "$SSH $HOST alt_root_op -W -d $TARGET_DISK"
    log "OS level on alternate rootvg"
    run_cmd "$SSH $HOST chroot $ALT_MOUNT /usr/bin/oslevel -s"
    log "Final lppchk"
    run_cmd "$SSH $HOST chroot $ALT_MOUNT /usr/bin/lppchk -vm3"
}

switch_bootlist() {
    log "Switching bootlist to alternate rootvg"
    run_cmd "$SSH $HOST bootlist -m normal $TARGET_DISK"
    run_cmd "$SSH $HOST bootlist -m normal -o"
}

sleep_alt_rootvg() {
    log "Sleeping alternate rootvg"
    run_cmd "$SSH $HOST alt_root_op -S -d $TARGET_DISK"
}

###############################################################################
# EXECUTION
###############################################################################
mount_nfs_repo
handle_rootvg_mirror
cleanup_alt_rootvg
alt_clone_phase1
alt_emgr_commit_lppchk
alt_clone_phase23
verify_alt_rootvg
switch_bootlist
sleep_alt_rootvg
run_cmd "$SSH $HOST umount $NFS_MOUNT"

###############################################################################
# FINAL
###############################################################################
log "AIX OS patching completed successfully for $HOST"
log "Manual reboot required to activate new rootvg"
[[ "$PREVIEW" -eq 1 ]] && log "Dry-run commands saved in: $DRYRUN_FILE"
exit 0

--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Sample Run — Preview Mode
# ./aix_os_patch-one-host.ksh -h my-aix-host -o patch -p

Output:
2026-01-17 12:00:01 : Starting AIX OS patching for my-aix-host
2026-01-17 12:00:02 : PREVIEW: ssh my-aix-host mkdir -p /mnt
2026-01-17 12:00:02 : PREVIEW: ssh my-aix-host mount aixnimserver:/exports/software/aix_72.05.10 /mnt
2026-01-17 12:00:03 : PREVIEW: ssh my-aix-host alt_disk_copy -d hdisk2 -P1
2026-01-17 12:00:03 : PREVIEW: ssh my-aix-host alt_disk_copy -d hdisk2 -P23 -l /mnt -b update_all
2026-01-17 12:00:04 : PREVIEW: ssh my-aix-host alt_root_op -W -d hdisk2
2026-01-17 12:00:04 : PREVIEW: ssh my-aix-host chroot /alt_inst oslevel -s
2026-01-17 12:00:05 : PREVIEW: ssh my-aix-host chroot /alt_inst lppchk -vm3
2026-01-17 12:00:05 : PREVIEW: ssh my-aix-host bootlist -m normal hdisk2
2026-01-17 12:00:05 : PREVIEW: ssh my-aix-host bootlist -m normal -o
2026-01-17 12:00:06 : PREVIEW: ssh my-aix-host alt_root_op -S -d hdisk2
2026-01-17 12:00:06 : PREVIEW: ssh my-aix-host umount /mnt
2026-01-17 12:00:06 : Dry-run commands saved in: logs/my-aix-host.dryrun.cmds

Sample Run — Full Upgrade
# ./aix_os_patch-one-host.ksh -h my-aix-host -o patch -f

Output:
2026-01-17 12:10:01 : Starting AIX OS patching for my-aix-host
2026-01-17 12:10:02 : EXEC: ssh my-aix-host mkdir -p /mnt
2026-01-17 12:10:02 : EXEC: ssh my-aix-host mount aixnimserver:/exports/software/aix_72.05.10 /mnt
2026-01-17 12:10:03 : EXEC: ssh my-aix-host alt_disk_copy -d hdisk2 -P1
2026-01-17 12:10:10 : EXEC: ssh my-aix-host alt_disk_copy -d hdisk2 -P23 -l /mnt -b update_all
2026-01-17 12:10:20 : EXEC: ssh my-aix-host alt_root_op -W -d hdisk2
2026-01-17 12:10:21 : EXEC: ssh my-aix-host chroot /alt_inst oslevel -s
2026-01-17 12:10:22 : EXEC: ssh my-aix-host chroot /alt_inst lppchk -vm3
2026-01-17 12:10:22 : EXEC: ssh my-aix-host bootlist -m normal hdisk2
2026-01-17 12:10:23 : EXEC: ssh my-aix-host bootlist -m normal -o
2026-01-17 12:10:23 : EXEC: ssh my-aix-host alt_root_op -S -d hdisk2
2026-01-17 12:10:24 : EXEC: ssh my-aix-host umount /mnt
2026-01-17 12:10:24 : AIX OS patching completed successfully for my-aix-host
2026-01-17 12:10:24 : Manual reboot required to activate new rootvg

Conclusion
This production-ready script allows safe, automated AIX OS patching with:
  • Alternate rootvg creation and TL/SP application
  • Rootvg mirror handling
  • Existing altinst_rootvg cleanup
  • NFS repository mounting
  • Preview mode for safe testing before execution
Tip: Always run preview mode first to ensure disk selection and commands are safe.

Automating Linux LVM Provisioning Using a CSV File

Managing LVM creation across multiple Linux servers can quickly become repetitive and error-prone. In this post, we will walk through a simple, production-ready Bash automation that provisions PV, VG, LV, filesystem, mount point, and fstab entries on multiple servers using a CSV file as input.

This approach is ideal for system administrators who want repeatable, auditable, and scalable storage provisioning.

What This Script Does
Using a single CSV file, the script will:
  • Connect to each server listed in the CSV
  • Validate block devices
  • Create Physical Volumes (PV)
  • Create or reuse Volume Groups (VG)
  • Create Logical Volumes (LV)
  • Create filesystems (XFS or ext4) non-interactively
  • Mount the filesystem
  • Persist the mount in `/etc/fstab`
  • Log all actions per server
The script is idempotent, meaning it is safe to re-run.

CSV Input File
The CSV file is the source of truth for all provisioning operations.

File: `linux_lvm.csv`
# server_name,pv_name,vg_name,lv_name,lv_size,lv_type,mount_point
192.168.10.220,/dev/sda,datavg,lv_app,2G,xfs,/app
192.168.10.220,/dev/sda,datavg,lv_dba,2G,xfs,/dba
192.168.10.221,/dev/sda,datavg,lv_app,2G,xfs,/app
192.168.10.221,/dev/sdb,datavg,lv_dba,2G,xfs,/dba

Column Explanation
server_name  ---> Hostname or IP address to SSH into 
pv_name  ---> Block device to use as PV
vg_name  ---> Volume Group name 
lv_name ---> Logical Volume name
lv_size  ---> Size of the LV (example: 2G)
lv_type  ---> Filesystem type (`xfs` or `ext4`) 
mount_point  ---> Mount directory 

Script Overview

The script performs the following steps for each row in the CSV:
1. Reads the CSV (skipping the header)
2. Validates required fields and filesystem type
3. SSHs into the target server
4. Checks if the PV exists, otherwise creates it
5. Checks if the VG exists, otherwise creates it
6. Creates the LV if it does not exist
7. Detects existing filesystem signatures
8. Creates the filesystem safely and non-interactively
9. Mounts the filesystem
10. Adds a persistent `/etc/fstab` entry
11. Logs output to `/var/log/lvm_provision_<hostname>.log`

LVM Provisioning Script
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#!/bin/bash
# Author: adminCtrlX
# Purpose: LVM provisioning using server_name from CSV

CSV_FILE="/tmp/scripts/linux_lvm.csv"
SUPPORTED_FS="ext4 xfs"

[ ! -f "$CSV_FILE" ] && {
echo "ERROR: CSV file not found: $CSV_FILE"
exit 1
}

tail -n +2 "$CSV_FILE" | while IFS=',' read -r \
server_name pv_name vg_name lv_name lv_size lv_type mount_point
do
echo "========== Processing $server_name =========="

if [[ -z "$server_name" || -z "$pv_name" || -z "$vg_name" || -z "$lv_name" || \
-z "$lv_size" || -z "$lv_type" || -z "$mount_point" ]]; then
echo "ERROR: Missing mandatory CSV fields. Skipping row."
continue
fi

[[ ! "$lv_size" =~ ^[0-9]+G$ ]] && continue
[[ ! " $SUPPORTED_FS " =~ " $lv_type " ]] && continue

ssh "$server_name" sudo bash <<EOF
LOG_FILE="/var/log/lvm_provision_\$(hostname).log"
exec > >(tee -a "\$LOG_FILE") 2>&1

[ ! -b "$pv_name" ] && { echo "Block device not found"; exit 1; }
pvs "$pv_name" &>/dev/null || pvcreate "$pv_name"
vgs "$vg_name" &>/dev/null || vgcreate "$vg_name" "$pv_name"

LV_PATH="/dev/$vg_name/$lv_name"
lvs "\$LV_PATH" &>/dev/null || lvcreate -L "$lv_size" -n "$lv_name" "$vg_name"

FS_EXIST=\$(lsblk -no FSTYPE "\$LV_PATH")
if [ -z "\$FS_EXIST" ]; then
wipefs -a "\$LV_PATH"
mkfs.$lv_type -f "\$LV_PATH"
fi

mkdir -p "$mount_point"
mountpoint -q "$mount_point" || mount "\$LV_PATH" "$mount_point"
grep -q "\$LV_PATH" /etc/fstab || \
echo "\$LV_PATH $mount_point $lv_type defaults 0 0" >> /etc/fstab
echo "SUCCESS on \$(hostname)"
EOF
done
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Logging
Each server logs execution details to:
/var/log/lvm_provision_<hostname>.log
This makes troubleshooting and auditing straightforward.

Benefits of This Approach
  • CSV-driven infrastructure
  • No interactive prompts
  • Safe to re-run
  • Scales to dozens of servers
  • Clear separation of data and logic
  • Ideal for automation and CI/CD pipelines
Final Thoughts
Using a CSV-driven LVM provisioning script gives you consistency, speed, and reliability when managing storage across multiple Linux systems. This method is especially useful in environments where infrastructure must be provisioned repeatedly with minimal human error.
If you’d like to extend this further, consider adding:
  • DRY-RUN mode
  • Filesystem resize support
  • Wipe control per volume
  • Parallel SSH execution

Automating User Creation on Multiple Linux Servers Using Bash and CSV

Managing user accounts across multiple Linux servers can quickly become repetitive and error-prone. In this post, we walk through a Bash-based automation approach that creates users on multiple remote servers using a CSV input file.
This solution is ideal for system administrators who want a simple, SSH-based alternative to heavier tools while still maintaining consistency and control.

Overview
The script:
  • Reads user details from a CSV file
  • Creates primary groups with specific GIDs
  • Adds users to up to three secondary groups
  • Creates users only if they do not already exist
  • Executes the process on multiple remote servers passed as arguments
CSV File Format
The script expects the following CSV structure:
user_id,
user_pri_group,
user_pri_group_id,
user_sec_group1,
user_sec_group2,
user_sec_group3,
user_home_dir,
user_shell,
user_password,
user_gecos_info

Sample CSV Input
# user_id,user_pri_group,user_pri_group_id,user_sec_group1,user_sec_group2,user_sec_group3,user_home_dir,user_shell,user_password,user_gecos_info

tasleem,tasleem,1005,apps,dba,sysadm,/home/tasleem,/bin/bash,root123,Tasleem Ahmed Khan
hamzah,hamzah,1006,apps,dba,sysadm,/home/hamzah,/bin/bash,root123,Hamzah Ali Khan

Primary Group Enforcement
The primary group name and GID must be present. If missing, user creation is skipped.

Secondary Group Handling
Up to three secondary groups are supported.
If a group does not exist, it is created automatically.

Idempotent Execution

If a user already exists, the script safely skips creation.

Multi-Server Support
The same CSV file is applied to all servers passed on the command line.

Bash Script
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#!/bin/bash
#
# Author : adminCtrlX
# Description : Automating User Creation on Multiple Linux Servers Using Bash and CSV
# Usage : ./create_users_remote.sh host1 host2 host3
#

CSV_FILE="/tmp/scripts/users.csv"

if [ $# -lt 1 ]; then
echo "Usage: $0 host1 host2 ... hostN"
exit 1
fi

HOSTS="$@"

tail -n +2 "$CSV_FILE" | while IFS=',' read -r \
user_id pri_group pri_gid sec_grp1 sec_grp2 sec_grp3 home_dir shell password gecos
do
for server in $HOSTS; do
echo "Processing user $user_id on $server..."

ssh "$server" sudo bash <<EOF

if [ -z "$pri_group" ] || [ -z "$pri_gid" ]; then
echo "Primary group or GID missing for $user_id. Skipping."
exit 0
fi

if ! getent group "$pri_group" >/dev/null; then
groupadd -g "$pri_gid" "$pri_group"
fi

SEC_GROUPS=""
for grp in "$sec_grp1" "$sec_grp2" "$sec_grp3"; do
if [ -n "\$grp" ]; then
getent group "\$grp" >/dev/null || groupadd "\$grp"
SEC_GROUPS="\$SEC_GROUPS,\$grp"
fi
done

SEC_GROUPS="\${SEC_GROUPS#,}"

if ! id "$user_id" >/dev/null 2>&1; then
useradd \
-g "$pri_group" \
\${SEC_GROUPS:+-G "\$SEC_GROUPS"} \
-d "$home_dir" \
-s "$shell" \
-c "$gecos" \
-m "$user_id"

echo "$user_id:$password" | chpasswd
echo "User $user_id created successfully on $server"
else
echo "User $user_id already exists on $server"
fi
EOF
done
done
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

How to Run the Script
1. Make the script executable:
# chmod +x create_users_remote.sh

2. Execute it by passing the target servers:
# ./create_users_remote.sh server1 server2 server3

Security Considerations
Storing plain-text passwords in CSV files is not recommended for production environments. Consider:
  • Using hashed passwords
  • Forcing password change on first login
  • Using SSH keys instead of passwords
Conclusion
This Bash-based approach provides a lightweight yet effective way to manage users across multiple Linux servers. It is easy to understand, easy to modify, and suitable for small to medium-scale environments where full configuration management tools may not be required.

Installing Atlassian Jira Software 8.19.0 on RHEL 10

Atlassian Jira is a widely used issue and project tracking tool. In this post, we’ll walk through the installation of Jira Software 8.19.0 on Red Hat Enterprise Linux (RHEL) 10 using the official Linux installer (.bin). This guide assumes you are logged in as the root user.

Prerequisites
Before starting, ensure:
  • RHEL 10 is installed and running
  • You have root or sudo privileges
  • Required ports are free: 8080 (HTTP) & 8005 (Control/RMI)
  • At least 2 GB RAM (4 GB recommended)
Step 1: Download the Jira Installer
Download the Jira Software binary installer from Atlassian’s official site (https://www.atlassian.com/software/jira/download/data-center) and place it in your working directory (for example, /tmp).
Example file:
atlassian-jira-software-8.19.0-x64.bin

Step 2: Make the Installer Executable
Change the file permissions to make the installer executable:
# chmod 775 atlassian-jira-software-8.19.0-x64.bin
Verify permissions:
# ll
Output:
-rwxrwxr-x 1 root root 462430556 Sep 15 2021 atlassian-jira-software-8.19.0-x64.bin

Step 3: Run the Jira Installer
Start the installation:
# ./atlassian-jira-software-8.19.0-x64.bin
The installer will unpack its bundled JRE and launch the setup wizard.

Step 4: Installer Configuration Options
During installation, select the following options:
Installation Type
Choose:
Custom Install (recommended for advanced users)
Installation Directory
/opt/atlassian/jira
Jira Home (Data Directory)
/var/atlassian/application-data/jira
Ports Configuration
Use default ports:
HTTP Port: 8080
Control (RMI) Port: 8005

Run Jira as a Service
Choose:
Yes
This ensures Jira starts automatically on system reboot.

Step 5: Confirm Installation Settings
Summary displayed by the installer:
Installation Directory: /opt/atlassian/jira
Home Directory: /var/atlassian/application-data/jira
HTTP Port: 8080
RMI Port: 8005
Install as Service: Yes
Proceed by selecting Install.

Step 6: Start Jira Software
Once the installation completes, choose:
Start Jira Software 8.19.0 now? Yes
The installer will start Jira and register it as a system service.

Step 7: Access Jira Web Interface
After startup, Jira becomes accessible at:
http://localhost:8080
When you open this URL in your browser, you will see the initial setup screen with two options:

Choose the option that best fits your environment. and login google account

Next show server IP address Example 192.168.10.120 & confirm 
Next Setup Administrator account then Next 
Continue with English 
The screen will show as below and you can create project & import issue.
Conclusion
You have successfully installed Atlassian Jira Software 8.19.0 on RHEL 10 using the official Linux installer. Jira is now running as a service and ready for initial configuration via the web interface.

This setup is ideal for:
Agile project management
Issue tracking
Enterprise team collaboration