From ebe2ddc8e4c0ba925da4f1b4025abfd525e74c7d Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Wed, 17 May 2023 12:28:52 +0100 Subject: [PATCH 01/25] feat: alpha alter storage --- .../host_provision/build_execution.tf | 2 - .../host_provision/build_filesystem_setup.tf | 601 ------------------ .../host_provision/build_os_prepare.tf | 13 +- .../host_provision/build_os_user_root.tf | 1 - aws_ec2_instance/host_provision/host.tf | 92 +-- .../host_provision/host_block_storage.tf | 219 +------ .../host_provision/module_variables.tf | 146 +---- 7 files changed, 23 insertions(+), 1051 deletions(-) delete mode 100644 aws_ec2_instance/host_provision/build_filesystem_setup.tf diff --git a/aws_ec2_instance/host_provision/build_execution.tf b/aws_ec2_instance/host_provision/build_execution.tf index bb496d2..a579ca6 100644 --- a/aws_ec2_instance/host_provision/build_execution.tf +++ b/aws_ec2_instance/host_provision/build_execution.tf @@ -6,7 +6,6 @@ resource "null_resource" "execute_os_scripts" { depends_on = [ null_resource.build_script_os_user_root, null_resource.dns_resolv_files, - null_resource.build_script_fs_init, null_resource.build_script_os_prepare ] @@ -43,7 +42,6 @@ resource "null_resource" "execute_os_scripts" { "echo 'Show HOME directory for reference Shell scripts were transferred'", "ls -lha $HOME", "/home/ec2-user/terraform_dig.sh", - "/home/ec2-user/terraform_fs_init.sh", "/home/ec2-user/terraform_os_prep.sh" ] } diff --git a/aws_ec2_instance/host_provision/build_filesystem_setup.tf b/aws_ec2_instance/host_provision/build_filesystem_setup.tf deleted file mode 100644 index 324000a..0000000 --- a/aws_ec2_instance/host_provision/build_filesystem_setup.tf +++ /dev/null @@ -1,601 +0,0 @@ - -resource "null_resource" "build_script_fs_init" { - - depends_on = [ - aws_volume_attachment.volume_attachment_hana_data, - aws_volume_attachment.volume_attachment_hana_data_custom, - aws_volume_attachment.volume_attachment_hana_log, - aws_volume_attachment.volume_attachment_hana_log_custom, - aws_volume_attachment.volume_attachment_hana_shared, - aws_volume_attachment.volume_attachment_hana_shared_custom, - aws_volume_attachment.volume_attachment_usr_sap, - aws_volume_attachment.volume_attachment_sapmnt, - aws_volume_attachment.volume_attachment_swap, - aws_volume_attachment.volume_attachment_software, - aws_volume_attachment.volume_attachment_anydb, - aws_volume_attachment.volume_attachment_anydb_custom - ] - - connection { - type = "ssh" - user = "ec2-user" - host = aws_instance.host.private_ip - private_key = var.module_var_host_ssh_private_key - bastion_host = var.module_var_bastion_ip - bastion_port = var.module_var_bastion_ssh_port - bastion_user = var.module_var_bastion_user - bastion_private_key = var.module_var_bastion_private_ssh_key - #bastion_host_key = tls_private_key.bastion_ssh.public_key_openssh - } - - # Path must already exist and must not use Bash shell special variable, e.g. cannot use $HOME/file.sh - # "By default, OpenSSH's scp implementation runs in the remote user's home directory and so you can specify a relative path to upload into that home directory" - # https://www.terraform.io/language/resources/provisioners/file#destination-paths - provisioner "file" { - destination = "terraform_fs_init.sh" - content = <1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - #### - # Create LVM Physical Volumes - # - # This initialises the whole Disk or a Disk Partition as LVM Physical Volumes for use as part of LVM Logical Volumes - # - # First physical extent begins at 1MB which is defined by default_data_alignment in lvm.conf and this can be overriden by --dataalignment. - # Default 1MB offset from disk start before first LVM PV Physical Extent is used, - # and an additional offset after can be set using --dataalignmentoffset. - # - # I/O from the LVM Volume Group to the LVM Physical Volume will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Physical Volume data alignment offset - #### - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = "$disk_capacity_gb_specified" ]] - then - echo "Creating LVM Physical Volume for /dev/$disk_id using data alignment offset $lvm_pv_data_alignment" - pvcreate "/dev/$disk_id" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$disk_id to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$disk_id" & echo $lvm_volume_group_target_list) - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - - #### - # Create LVM Volume Groups and add LVM Physical Volumes - # Default is 1MiB offset from disk start before first LVM VG Physical Extent is used - # Default is 4MiB for the physical extent size (aka. block size), once set this is difficult to change - # - # I/O from the LVM Logical Volume to the LVM Volume Group will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Volume Group data alignment offset - # - # Therefore the LVM Volume Group extent size acts as the block size from LVM virtualization to the physical disks - #### - - echo "Creating $lvm_volume_group_name volume group with $(echo $lvm_volume_group_target_list | tr -d '\n'), using $lvm_volume_group_data_alignment data alignment and $lvm_volume_group_physical_extent_size extent size (block size)" - vgcreate --dataalignment $lvm_volume_group_data_alignment --physicalextentsize $lvm_volume_group_physical_extent_size $lvm_volume_group_name $(echo $lvm_volume_group_target_list | tr -d '\n') - echo "" - - ####### - # Create expandable LVM Logical Volume, using single or multiple physical disk volumes - # Default is 64K for the stripe size (aka. block size) - # - # I/O from the OS/Applications to the LVM Logical Volume will use the stripe size defined - # - # Therefore the LVM Logical Volume stripe size acts as the block size from OS to LVM virtualization - # IMPORTANT: Correct setting of this stripe size has impact on performance of OS and Applications read/write - ####### - - # Count number of LVM Physical Volumes in the LVM Volume Group - count_physical_volumes=$(echo "$lvm_volume_group_target_list" | wc -w) - - # Create LVM Logical Volume - # Stripe across all LVM Physical Volumes available in the LVM Volume Group - echo "Creating $lvm_logical_volume_name logical volume for $lvm_volume_group_name volume group, using $lvm_logical_volume_stripe_size extent size (block size)" - lvcreate $lvm_volume_group_name --yes --extents "100%FREE" --stripesize $lvm_logical_volume_stripe_size --stripes $count_physical_volumes --name "$lvm_logical_volume_name" - echo "" - - - ####### - # Create File System formatting for the LVM Logical Volume - # Filesystem is either XFS or EXT4 - ####### - - echo "Create File System formatting for the LVM Logical Volume" - mkfs.$filesystem_format "/dev/$lvm_volume_group_name/$lvm_logical_volume_name" - echo "" - - - ####### - # Permenant mount point - ####### - - # Note: After enabling multipath on the Linux host and rebooting the system, disk paths might appear in “/dev/UUID” form with a unique alphanumeric identifier. - # This can be seen by using the “lsblk” command on Linux. The preferred method is to use this disk path as opposed to the “/dev/sdX” path when formatting and mounting file systems. - - # Note: When adding an /etc/fstab entry for iSCSI based disk devices, use the “_netdev” mount option to ensure - # that the network link is ready before the operating system attempts to mount the disk. - - echo "Create fstab entries for $lvm_volume_group_name" - echo "# fstab entries for $lvm_volume_group_name" >> /etc/fstab - echo "/dev/$lvm_volume_group_name/$lvm_logical_volume_name $mount_point $filesystem_format defaults,noatime 0 0" >> /etc/fstab - echo "" - -} - - - - -############################################# -# Physical Volume Partition formatting -############################################# - -function physical_volume_partition_runner() { - - mount_point="$1" - disk_capacity_gb_specified="$2" - physical_partition_filesystem_block_size="$3" - physical_partition_name="$4" - filesystem_format="$5" - - # Ensure directory is available - mkdir --parents $mount_point - - # Clear any previous data entries on previously formatted disks - unset existing_disks_list - unset lvm_volume_group_target_list - unset physical_disks_list_with_gigabytes - - # Find existing disk devices and partitions - for disk in $(blkid -o device) - do - existing_disk_no_partition=$(echo "$disk" | sed 's/[0-9]\+$//') - export existing_disks_list=$(echo $existing_disk_no_partition & echo $existing_disks_list) - unset existing_disk_no_partition - done - - # Run calculations - physical_disks_list=$(lsblk --nodeps --bytes --noheadings -io KNAME,FSTYPE | awk 'BEGIN{OFS="\t"} {if (FNR>1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - if [[ $filesystem_format == "xfs" ]] - then - echo "#### XFS on Linux supports only filesystems with block sizes EQUAL to the system page size. ####" - echo "#### The disk can be formatted with up to 64 KiB, however it will fail to mount with the following error ####" - echo "# mount(2) system call failed: Function not implemented." - echo "" - echo "#### The default page size is hardcoded and cannot be changed. ####" - echo "" - echo "#### Red Hat KB: What is the maximum supported XFS block size in RHEL? - https://access.redhat.com/solutions/1614393 ####" - echo "#### Red Hat KB: Is it possible to change Page Size in Red Hat Enterprise Linux? - https://access.redhat.com/solutions/4854441 ####" - echo "" - echo "Page Size currently set to:" - getconf PAGESIZE - echo "" - fi - - page_size=$(getconf PAGESIZE) - - if [[ $filesystem_format == "xfs" ]] && [[ $(( page_size/1024 )) != $(echo $physical_partition_filesystem_block_size | sed 's/[^0-9]*//g') ]] - then - echo "Requested XFS Block Sizes are not equal to the Page Size, amend to Page Size" - echo "$mount_point requested as xfs with block size $physical_partition_filesystem_block_size, resetting to $page_size" - block_size_definition=$page_size - else - block_size_definition=$physical_partition_filesystem_block_size - fi - - - # Mount options for filesystem table. - # With only 4 KiB Page Size, only 2 in-memory log buffers are available so increase to each buffer's size (default 32kc) may increase performance - mount_options="defaults,noatime" - #mount_options="defaults,logbsize=256k" - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $disk_capacity_gb_specified ]] - then - echo "Creating Whole Disk Physical Volume Partition and File System for /dev/$disk_id at $mount_point with GPT Partition Table, start at 1MiB" - parted --script /dev/$disk_id \ - mklabel gpt \ - mkpart primary $filesystem_format 1MiB 100% \ - name 1 $physical_partition_name - echo "Format Disk Partition with File System, with block size $block_size_definition" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/$disk_id - echo "Write Mount Points to Linux File System Table" - PhysicalDiskUUID=$(blkid /dev/$disk_id -sUUID -ovalue) - echo "UUID=$PhysicalDiskUUID $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Swap file or partition -############################################# - -function create_swap_file() { - - echo "Create swapfile" - - swap_gb="$1" - swap_bs="128" - - swap_calc_bs=$swap_bs"M" - swap_calc_count="$((x=$swap_gb*1024,x/$swap_bs))" - dd if=/dev/zero of=/swapfile bs=$swap_calc_bs count=$swap_calc_count - chmod 600 /swapfile - mkswap /swapfile - swapon /swapfile - echo '/swapfile swap swap defaults 0 0' >> /etc/fstab - swapon --show - free -h - -} - - -function create_swap_partition() { - - find_swap_partition_by_size="$1" - - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $find_swap_partition_by_size ]] - then - echo "Create swap partition" - mkswap /dev/$disk_id - swapon /dev/$disk_id - echo "/dev/$disk_id swap swap defaults 0 0" >> /etc/fstab - swapon --show - free -h - echo "" - break - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Verify/Debug -############################################# - -storage_debug="false" - -function storage_debug_run() { - -if [ "$storage_debug" == "true" ] -then - - echo "--- Show Mount points ---" - df -h - printf "\n----------------\n\n" - - echo "--- Show /etc/fstab file ---" - cat /etc/fstab - printf "\n----------------\n\n" - - echo "--- Show Block devices ---" - blkid - printf "\n----------------\n\n" - - echo "--- Show Block devices information ---" - lsblk -o NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PHY-SEC,LOG-SEC - printf "\n----------------\n\n" - - echo "--- Show Hardware List of Disks and Volumes ---" - lshw -class disk -class volume - ###lshw -json -class disk -class volume | jq '[.logicalname, .configuration.sectorsize, .configuration.logicalsectorsize]' - ###tail -n +1 /sys/block/vd*/queue/*_block_size - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes ---" - pvs - # pvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes information ---" - pvdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups ---" - vgs - # vgs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups information ---" - vgdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes ---" - lvs - # lvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes information ---" - lvdisplay - printf "\n----------------\n\n" - -fi - -} - - - - -############################################# -# MAIN -############################################# - -function main() { - - check_os_distribution - - # Bash Functions use logic of "If injected Terraform value is true (i.e. LVM is used for the mount point) then run Bash Function". - # Ensure Bash Function is called with quotes surrounding Bash Variable of list, otherwise will expand and override other Bash Function Arguments - - echo 'Install jq' - if [ ! -f /usr/local/bin/jq ]; then curl -L 'https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64' -o jq && chmod +x jq && mv jq /usr/local/bin; fi - - # Create the required directories - mkdir --parents /hana/{shared,data,log} --mode 755 - mkdir --parents /usr/sap --mode 755 - mkdir --parents /sapmnt --mode 755 - - - # If any mount point uses LVM. i.e. IF with OR operator - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] || [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_install - fi - - - if [[ ${var.module_var_disk_volume_count_hana_data} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] - then - lvm_filesystem_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_lvm_pv_data_alignment_hana_data}" "vg_hana_data" "${var.module_var_lvm_vg_data_alignment_hana_data}" "${var.module_var_lvm_vg_physical_extent_size_hana_data}" "${var.module_var_lvm_lv_stripe_size_hana_data}" "${var.module_var_filesystem_hana_data}" - - elif [[ "${var.module_var_lvm_enable_hana_data}" == "false" ]] - then - physical_volume_partition_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_physical_partition_filesystem_block_size_hana_data}" "hana_data" "${var.module_var_filesystem_hana_data}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_log} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] - then - lvm_filesystem_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_lvm_pv_data_alignment_hana_log}" "vg_hana_log" "${var.module_var_lvm_vg_data_alignment_hana_log}" "${var.module_var_lvm_vg_physical_extent_size_hana_log}" "${var.module_var_lvm_lv_stripe_size_hana_log}" "${var.module_var_filesystem_hana_log}" - - elif [[ "${var.module_var_lvm_enable_hana_log}" == "false" ]] - then - physical_volume_partition_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_physical_partition_filesystem_block_size_hana_log}" "hana_log" "${var.module_var_filesystem_hana_log}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_shared} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] - then - lvm_filesystem_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_lvm_pv_data_alignment_hana_shared}" "vg_hana_shared" "${var.module_var_lvm_vg_data_alignment_hana_shared}" "${var.module_var_lvm_vg_physical_extent_size_hana_shared}" "${var.module_var_lvm_lv_stripe_size_hana_shared}" "${var.module_var_filesystem_hana_shared}" - - elif [[ "${var.module_var_lvm_enable_hana_shared}" == "false" ]] - then - physical_volume_partition_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_physical_partition_filesystem_block_size_hana_shared}" "hana_shared" "${var.module_var_filesystem_hana_shared}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_anydb} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_filesystem_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_lvm_pv_data_alignment_anydb}" "vg_anydb" "${var.module_var_lvm_vg_data_alignment_anydb}" "${var.module_var_lvm_vg_physical_extent_size_anydb}" "${var.module_var_lvm_lv_stripe_size_anydb}" "${var.module_var_filesystem_anydb}" - - elif [[ "${var.module_var_lvm_enable_anydb}" == "false" ]] - then - physical_volume_partition_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_physical_partition_filesystem_block_size_anydb}" "anydb" "${var.module_var_filesystem_anydb}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_usr_sap} -gt 0 ]] - then - physical_volume_partition_runner "/usr/sap" "${var.module_var_disk_volume_capacity_usr_sap}" "4k" "usr_sap" "${var.module_var_filesystem_usr_sap}" - fi - - - if [[ ${var.module_var_nfs_boolean_sapmnt} == "false" ]] && [[ ${var.module_var_disk_volume_count_sapmnt} -gt 0 ]] - then - physical_volume_partition_runner "/sapmnt" "${var.module_var_disk_volume_capacity_sapmnt}" "4k" "sapmnt" "${var.module_var_filesystem_sapmnt}" - elif [[ ${var.module_var_nfs_boolean_sapmnt} == "true" ]] - then - # Establish AWS EFS Mount Target DNS Name (the AWS EFS network interface must be added to the correct Security Groups for the hosts) - aws_efs_mount_fqdn_sapmnt='${var.module_var_nfs_boolean_sapmnt ? var.module_var_nfs_fqdn_sapmnt : "null"}' - - # AWS recommend OS mount of the AWS EFS Mount Target via the DNS FQDN, which resolves to the IP Address of the AWS EFS Mount Target in the same AWS Availability Zone as the AWS EC2 Virtual Server. - # Usually to increase network latency performance for SAP NetWeaver read/write operations, IP Addresses should be used - # However, the NFS protocol performs DNS lookup at mount time, and stores into the local host DNS cache - based on the DNS TTL defined by the AWS DNS Name Server - # As this activity is infrequent, it should not impact performance to set to the FQDN of the AWS EFS Mount Target - # It is mandatory that the AWS VPC has enabled DNS Support and DNS Hostname Support, otherwise resolution to the Private IP Address will fail - - # Install NFS - if [ "$os_type" = "rhel" ] ; then yum --assumeyes --debuglevel=1 install nfs-utils ; elif [ "$os_type" = "sles" ] ; then zypper install --no-confirm nfs-client ; fi - - # Mount AWS EFS via DNS FQDN - echo "Mounting NFS for /sapmnt to AWS EFS Mount Target DNS Name: $aws_efs_mount_fqdn_sapmnt" - #sudo mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport $aws_efs_mount_fqdn_sapmnt:/ /sapmnt - echo "# fstab entries for NFS" >> /etc/fstab - echo "$aws_efs_mount_fqdn_sapmnt:/ /sapmnt nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev 0 0" >> /etc/fstab - fi - - - if [[ ${var.module_var_disk_swapfile_size_gb} -gt 0 ]] - then - create_swap_file "${var.module_var_disk_swapfile_size_gb}" - else - create_swap_partition "${var.module_var_disk_volume_capacity_swap}" - fi - - - physical_volume_partition_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "4k" "software" "xfs" - - - mount -a - -} - - -# Run script by calling 'main' Bash Function -main - - -EOF - } - -} diff --git a/aws_ec2_instance/host_provision/build_os_prepare.tf b/aws_ec2_instance/host_provision/build_os_prepare.tf index a3c2535..1ea2290 100644 --- a/aws_ec2_instance/host_provision/build_os_prepare.tf +++ b/aws_ec2_instance/host_provision/build_os_prepare.tf @@ -2,18 +2,7 @@ resource "null_resource" "build_script_os_prepare" { depends_on = [ - aws_volume_attachment.volume_attachment_hana_data, - aws_volume_attachment.volume_attachment_hana_data_custom, - aws_volume_attachment.volume_attachment_hana_log, - aws_volume_attachment.volume_attachment_hana_log_custom, - aws_volume_attachment.volume_attachment_hana_shared, - aws_volume_attachment.volume_attachment_hana_shared_custom, - aws_volume_attachment.volume_attachment_usr_sap, - aws_volume_attachment.volume_attachment_sapmnt, - aws_volume_attachment.volume_attachment_swap, - aws_volume_attachment.volume_attachment_software, - aws_volume_attachment.volume_attachment_anydb, - aws_volume_attachment.volume_attachment_anydb_custom + aws_volume_attachment.block_volume_attachment ] connection { diff --git a/aws_ec2_instance/host_provision/build_os_user_root.tf b/aws_ec2_instance/host_provision/build_os_user_root.tf index 1f7d526..f800400 100644 --- a/aws_ec2_instance/host_provision/build_os_user_root.tf +++ b/aws_ec2_instance/host_provision/build_os_user_root.tf @@ -3,7 +3,6 @@ resource "null_resource" "build_script_os_user_root" { depends_on = [ null_resource.dns_resolv_files, - null_resource.build_script_fs_init, null_resource.build_script_os_prepare ] diff --git a/aws_ec2_instance/host_provision/host.tf b/aws_ec2_instance/host_provision/host.tf index 255efce..d7a29b4 100644 --- a/aws_ec2_instance/host_provision/host.tf +++ b/aws_ec2_instance/host_provision/host.tf @@ -54,93 +54,11 @@ resource "aws_instance" "host" { } - # Attach EBS block disk volumes to host - -resource "aws_volume_attachment" "volume_attachment_hana_data" { - count = length(aws_ebs_volume.block_volume_hana_data_voltype.*.id) - device_name = "/dev/sd${element(["c", "d", "e", "f"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_hana_data_voltype.*.id, count.index) -} - -resource "aws_volume_attachment" "volume_attachment_hana_data_custom" { - count = length(aws_ebs_volume.block_volume_hana_data_custom.*.id) - device_name = "/dev/sd${element(["c", "d", "e", "f"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_hana_data_custom.*.id, count.index) -} - - -resource "aws_volume_attachment" "volume_attachment_hana_log" { - count = length(aws_ebs_volume.block_volume_hana_log_voltype.*.id) - device_name = "/dev/sd${element(["g", "h", "i", "j"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_hana_log_voltype.*.id, count.index) -} - -resource "aws_volume_attachment" "volume_attachment_hana_log_custom" { - count = length(aws_ebs_volume.block_volume_hana_log_custom.*.id) - device_name = "/dev/sd${element(["g", "h", "i", "j"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_hana_log_custom.*.id, count.index) -} - - -resource "aws_volume_attachment" "volume_attachment_hana_shared" { - count = length(aws_ebs_volume.block_volume_hana_shared_voltype.*.id) - device_name = "/dev/sd${element(["k", "l", "m", "n"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_hana_shared_voltype.*.id, count.index) -} - -resource "aws_volume_attachment" "volume_attachment_hana_shared_custom" { - count = length(aws_ebs_volume.block_volume_hana_shared_custom.*.id) - device_name = "/dev/sd${element(["k", "l", "m", "n"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_hana_shared_custom.*.id, count.index) -} - -resource "aws_volume_attachment" "volume_attachment_usr_sap" { - count = length(aws_ebs_volume.block_volume_usr_sap_voltype.*.id) - device_name = "/dev/sd${element(["o", "p", "q", "r"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_usr_sap_voltype.*.id, count.index) -} - -resource "aws_volume_attachment" "volume_attachment_sapmnt" { - count = var.module_var_nfs_boolean_sapmnt ? 0 : length(aws_ebs_volume.block_volume_sapmnt_voltype.*.id) - device_name = "/dev/sd${element(["s", "t", "u", "v"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_sapmnt_voltype.*.id, count.index) -} - -resource "aws_volume_attachment" "volume_attachment_swap" { - count = length(aws_ebs_volume.block_volume_swap_voltype.*.id) - device_name = "/dev/sd${element(["w", "x", "y", "z"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_swap_voltype.*.id, count.index) -} - -# AWS EBS does not accept /dev/sdaa. Only 1 software drive is expected therefore put to front of Linux device naming i.e. /dev/sdb -resource "aws_volume_attachment" "volume_attachment_software" { - count = length(aws_ebs_volume.block_volume_software_voltype.*.id) - device_name = "/dev/sdb" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_software_voltype.*.id, count.index) -} - - -resource "aws_volume_attachment" "volume_attachment_anydb" { - count = length(aws_ebs_volume.block_volume_anydb_voltype.*.id) - device_name = "/dev/sd${element(["aa", "ab", "ac", "ad"], count.index)}" - instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_anydb_voltype.*.id, count.index) -} - -resource "aws_volume_attachment" "volume_attachment_anydb_custom" { - count = length(aws_ebs_volume.block_volume_anydb_custom.*.id) - device_name = "/dev/sd${element(["aa", "ab", "ac", "ad"], count.index)}" +# AWS EBS does not accept /dev/sdaa. +resource "aws_volume_attachment" "block_volume_attachment" { + count = length(aws_ebs_volume.block_volume_provision.*.id) + device_name = "/dev/sd${element(["d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"], count.index)}" instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_anydb_custom.*.id, count.index) + volume_id = element(aws_ebs_volume.block_volume_provision.*.id, count.index) } diff --git a/aws_ec2_instance/host_provision/host_block_storage.tf b/aws_ec2_instance/host_provision/host_block_storage.tf index fb7cb16..878a55a 100644 --- a/aws_ec2_instance/host_provision/host_block_storage.tf +++ b/aws_ec2_instance/host_provision/host_block_storage.tf @@ -1,15 +1,16 @@ # Create Block Storage -resource "aws_ebs_volume" "block_volume_hana_data_voltype" { - count = var.module_var_disk_volume_type_hana_data != "custom" ? var.module_var_disk_volume_count_hana_data : 0 +resource "aws_ebs_volume" "block_volume_provision" { + count = length(var.module_var_storage_definition) availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_hana_data - size = var.module_var_disk_volume_capacity_hana_data + type = try(var.module_var_storage_definition[count.index]["disk_type"], "gp3") + size = var.module_var_storage_definition[count.index]["disk_size"] + iops = try(var.module_var_storage_definition[count.index]["disk_iops"], null) tags = { - Name = "${var.module_var_host_name}-volume-hana-data-${count.index}" + Name = "${var.module_var_host_name}-vol-${var.module_var_storage_definition[count.index].name}" } # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP @@ -17,213 +18,5 @@ resource "aws_ebs_volume" "block_volume_hana_data_voltype" { create = "30m" delete = "30m" } -} - -resource "aws_ebs_volume" "block_volume_hana_data_custom" { - count = var.module_var_disk_volume_type_hana_data == "custom" ? var.module_var_disk_volume_count_hana_data : 0 - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_hana_data - size = var.module_var_disk_volume_capacity_hana_data - iops = var.module_var_disk_volume_iops_hana_data - - tags = { - Name = "${var.module_var_host_name}-volume-hana-data-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "aws_ebs_volume" "block_volume_hana_log_voltype" { - count = var.module_var_disk_volume_type_hana_log != "custom" ? var.module_var_disk_volume_count_hana_log : 0 - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_hana_log - size = var.module_var_disk_volume_capacity_hana_log - - tags = { - Name = "${var.module_var_host_name}-volume-hana-log-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "aws_ebs_volume" "block_volume_hana_log_custom" { - count = var.module_var_disk_volume_type_hana_log == "custom" ? var.module_var_disk_volume_count_hana_log : 0 - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_hana_log - size = var.module_var_disk_volume_capacity_hana_log - iops = var.module_var_disk_volume_iops_hana_log - - tags = { - Name = "${var.module_var_host_name}-volume-hana-log-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "aws_ebs_volume" "block_volume_hana_shared_voltype" { - count = var.module_var_disk_volume_type_hana_shared != "custom" ? var.module_var_disk_volume_count_hana_shared : 0 - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_hana_shared - size = var.module_var_disk_volume_capacity_hana_shared - - tags = { - Name = "${var.module_var_host_name}-volume-hana-shared-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "aws_ebs_volume" "block_volume_hana_shared_custom" { - count = var.module_var_disk_volume_type_hana_shared == "custom" ? var.module_var_disk_volume_count_hana_shared : 0 - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_hana_shared - size = var.module_var_disk_volume_capacity_hana_shared - iops = var.module_var_disk_volume_iops_hana_shared - - tags = { - Name = "${var.module_var_host_name}-volume-hana-shared-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "aws_ebs_volume" "block_volume_anydb_voltype" { - count = var.module_var_disk_volume_type_anydb != "custom" ? var.module_var_disk_volume_count_anydb : 0 - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_anydb - size = var.module_var_disk_volume_capacity_anydb - tags = { - Name = "${var.module_var_host_name}-volume-anydb-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "aws_ebs_volume" "block_volume_anydb_custom" { - count = var.module_var_disk_volume_type_anydb == "custom" ? var.module_var_disk_volume_count_anydb : 0 - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_anydb - size = var.module_var_disk_volume_capacity_anydb - iops = var.module_var_disk_volume_iops_anydb - - tags = { - Name = "${var.module_var_host_name}-volume-anydb-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "aws_ebs_volume" "block_volume_usr_sap_voltype" { - count = var.module_var_disk_volume_count_usr_sap - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_usr_sap - size = var.module_var_disk_volume_capacity_usr_sap - - tags = { - Name = "${var.module_var_host_name}-volume-usr-sap-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "aws_ebs_volume" "block_volume_sapmnt_voltype" { - count = var.module_var_nfs_boolean_sapmnt ? 0 : var.module_var_disk_volume_count_sapmnt - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_sapmnt - size = var.module_var_disk_volume_capacity_sapmnt - - tags = { - Name = "${var.module_var_host_name}-volume-sapmnt-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "aws_ebs_volume" "block_volume_swap_voltype" { - count = var.module_var_disk_volume_count_swap - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_swap - size = var.module_var_disk_volume_capacity_swap - - tags = { - Name = "${var.module_var_host_name}-volume-swap-${count.index}" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "aws_ebs_volume" "block_volume_software_voltype" { - - availability_zone = local.target_vpc_availability_zone - type = var.module_var_disk_volume_type_software - size = var.module_var_disk_volume_capacity_software - - tags = { - Name = "${var.module_var_host_name}-volume-software" - } - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } } diff --git a/aws_ec2_instance/host_provision/module_variables.tf b/aws_ec2_instance/host_provision/module_variables.tf index 7a90964..158baa5 100644 --- a/aws_ec2_instance/host_provision/module_variables.tf +++ b/aws_ec2_instance/host_provision/module_variables.tf @@ -19,150 +19,26 @@ variable "module_var_bastion_private_ssh_key" {} variable "module_var_dns_zone_id" {} variable "module_var_dns_root_domain_name" {} -variable "module_var_disk_volume_type_hana_data" {} -variable "module_var_disk_volume_count_hana_data" {} -variable "module_var_disk_volume_capacity_hana_data" {} -variable "module_var_disk_volume_iops_hana_data" { - default = null -} -variable "module_var_lvm_enable_hana_data" {} -variable "module_var_lvm_pv_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_data" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_data" { - default = "64K" -} -variable "module_var_filesystem_hana_data" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_data" {} - - -variable "module_var_disk_volume_type_hana_log" {} -variable "module_var_disk_volume_count_hana_log" {} -variable "module_var_disk_volume_capacity_hana_log" {} -variable "module_var_disk_volume_iops_hana_log" { - default = null -} -variable "module_var_lvm_enable_hana_log" {} -variable "module_var_lvm_pv_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_log" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_log" { - default = "64K" -} -variable "module_var_filesystem_hana_log" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_log" {} - - -variable "module_var_disk_volume_type_hana_shared" {} -variable "module_var_disk_volume_count_hana_shared" {} -variable "module_var_disk_volume_capacity_hana_shared" {} -variable "module_var_disk_volume_iops_hana_shared" { - default = null -} -variable "module_var_lvm_enable_hana_shared" {} -variable "module_var_lvm_pv_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_shared" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_shared" { - default = "64K" -} -variable "module_var_filesystem_hana_shared" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_shared" {} +variable "module_var_dns_nameserver_list" {} -variable "module_var_disk_volume_type_anydb" {} -variable "module_var_disk_volume_count_anydb" {} -variable "module_var_disk_volume_capacity_anydb" {} -variable "module_var_disk_volume_iops_anydb" { - default = null -} -variable "module_var_lvm_enable_anydb" { +variable "module_var_disable_ip_anti_spoofing" { default = false } -variable "module_var_lvm_pv_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_anydb" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_anydb" { - default = "64K" -} -variable "module_var_filesystem_mount_path_anydb" { -} -variable "module_var_filesystem_anydb" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_anydb" { - default = "4k" -} +variable "module_var_filesystem_hana_data" {} -variable "module_var_disk_volume_count_usr_sap" {} -variable "module_var_disk_volume_type_usr_sap" {} -variable "module_var_disk_volume_capacity_usr_sap" {} -variable "module_var_filesystem_usr_sap" { - default = "xfs" -} +variable "module_var_filesystem_hana_log" {} +variable "module_var_filesystem_hana_shared" {} -variable "module_var_disk_volume_count_sapmnt" {} -variable "module_var_disk_volume_type_sapmnt" {} -variable "module_var_disk_volume_capacity_sapmnt" {} -variable "module_var_filesystem_sapmnt" { - default = "xfs" -} -variable "module_var_nfs_boolean_sapmnt" {} -variable "module_var_nfs_fqdn_sapmnt" {} - +variable "module_var_filesystem_anydb" {} -variable "module_var_disk_swapfile_size_gb" {} -variable "module_var_disk_volume_count_swap" {} -variable "module_var_disk_volume_type_swap" {} -variable "module_var_disk_volume_capacity_swap" {} -variable "module_var_filesystem_swap" { - default = "xfs" -} +variable "module_var_filesystem_usr_sap" {} +variable "module_var_filesystem_sapmnt" {} -variable "module_var_sap_software_download_directory" { - default = "/software" -} -variable "module_var_disk_volume_capacity_software" { - default = 525 -} -variable "module_var_disk_volume_type_software" { - default = "gp3" -} +variable "module_var_filesystem_swap" {} -variable "module_var_dns_nameserver_list" {} +variable "module_var_filesystem_software" {} -variable "module_var_disable_ip_anti_spoofing" { - default = false -} +variable "module_var_storage_definition" {} From e3fc83164d46506dd11a7d9e1843e119c5497522 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Wed, 17 May 2023 14:31:44 +0100 Subject: [PATCH 02/25] fix: redundant vars --- .../host_provision/module_variables.tf | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/aws_ec2_instance/host_provision/module_variables.tf b/aws_ec2_instance/host_provision/module_variables.tf index 158baa5..89c1dca 100644 --- a/aws_ec2_instance/host_provision/module_variables.tf +++ b/aws_ec2_instance/host_provision/module_variables.tf @@ -25,20 +25,4 @@ variable "module_var_disable_ip_anti_spoofing" { default = false } -variable "module_var_filesystem_hana_data" {} - -variable "module_var_filesystem_hana_log" {} - -variable "module_var_filesystem_hana_shared" {} - -variable "module_var_filesystem_anydb" {} - -variable "module_var_filesystem_usr_sap" {} - -variable "module_var_filesystem_sapmnt" {} - -variable "module_var_filesystem_swap" {} - -variable "module_var_filesystem_software" {} - variable "module_var_storage_definition" {} From 7528fb195cf04ee85e3a2555cd58d314d47a8e34 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Thu, 18 May 2023 17:49:33 +0100 Subject: [PATCH 03/25] fix: az nfs if boolean false --- msazure_vm/host_nfs/host_file_storage_private_endpoint.tf | 2 ++ msazure_vm/host_nfs/module_outputs.tf | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf b/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf index ccb81ed..f7a37cb 100644 --- a/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf +++ b/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf @@ -1,6 +1,7 @@ # Create Azure Storage Account Private Endpoint connection resource "azurerm_private_endpoint" "endpoint" { + count = var.module_var_nfs_boolean_sapmnt ? 1 : 0 name = "${var.module_var_resource_prefix}stgacc${random_string.random_suffix.result}-private-endpoint" resource_group_name = var.module_var_az_resource_group_name location = var.module_var_az_location_region @@ -24,6 +25,7 @@ resource "azurerm_private_endpoint" "endpoint" { # Create DNS A Records # This is alternative to creating the default private endpoint with a new Private DNS Zone 'privatelink.file.core.windows.net' resource "azurerm_private_dns_a_record" "dns_a_record_short" { + count = var.module_var_nfs_boolean_sapmnt ? 1 : 0 name = "${var.module_var_resource_prefix}stgacc${random_string.random_suffix.result}" resource_group_name = var.module_var_az_resource_group_name zone_name = var.module_var_dns_zone_name diff --git a/msazure_vm/host_nfs/module_outputs.tf b/msazure_vm/host_nfs/module_outputs.tf index 6bccb9c..ab09766 100644 --- a/msazure_vm/host_nfs/module_outputs.tf +++ b/msazure_vm/host_nfs/module_outputs.tf @@ -12,5 +12,5 @@ #} output "output_nfs_fqdn" { - value = "${azurerm_private_dns_a_record.dns_a_record_short.fqdn}:/${azapi_resource.storage_account_sap[0].name}/${azapi_resource.file_storage_sapmnt[0].name}" + value = try("${azurerm_private_dns_a_record.dns_a_record_short.fqdn}:/${azapi_resource.storage_account_sap[0].name}/${azapi_resource.file_storage_sapmnt[0].name}","") } From b3e1561ecb61453799235449ffcdaa515be55f2b Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Thu, 18 May 2023 17:53:11 +0100 Subject: [PATCH 04/25] fix: error in count --- msazure_vm/host_nfs/host_file_storage_private_endpoint.tf | 2 +- msazure_vm/host_nfs/module_outputs.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf b/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf index f7a37cb..e3ff1a1 100644 --- a/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf +++ b/msazure_vm/host_nfs/host_file_storage_private_endpoint.tf @@ -30,5 +30,5 @@ resource "azurerm_private_dns_a_record" "dns_a_record_short" { resource_group_name = var.module_var_az_resource_group_name zone_name = var.module_var_dns_zone_name ttl = 1000 - records = [azurerm_private_endpoint.endpoint.private_service_connection.0.private_ip_address] + records = [azurerm_private_endpoint.endpoint[0].private_service_connection.0.private_ip_address] } diff --git a/msazure_vm/host_nfs/module_outputs.tf b/msazure_vm/host_nfs/module_outputs.tf index ab09766..a57bbe2 100644 --- a/msazure_vm/host_nfs/module_outputs.tf +++ b/msazure_vm/host_nfs/module_outputs.tf @@ -12,5 +12,5 @@ #} output "output_nfs_fqdn" { - value = try("${azurerm_private_dns_a_record.dns_a_record_short.fqdn}:/${azapi_resource.storage_account_sap[0].name}/${azapi_resource.file_storage_sapmnt[0].name}","") + value = try("${azurerm_private_dns_a_record.dns_a_record_short[0].fqdn}:/${azapi_resource.storage_account_sap[0].name}/${azapi_resource.file_storage_sapmnt[0].name}","") } From c5b133f3f8afd66140f232806bce65b5b924d71f Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Fri, 19 May 2023 21:03:59 +0100 Subject: [PATCH 05/25] fix: disable ip spoofing --- msazure_vm/host_provision/host.tf | 2 ++ msazure_vm/host_provision/module_variables.tf | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/msazure_vm/host_provision/host.tf b/msazure_vm/host_provision/host.tf index 9d0c995..c243e7f 100644 --- a/msazure_vm/host_provision/host.tf +++ b/msazure_vm/host_provision/host.tf @@ -6,6 +6,8 @@ resource "azurerm_network_interface" "host_nic0" { resource_group_name = local.target_resource_group_name location = var.module_var_az_location_region + enable_ip_forwarding = var.module_var_disable_ip_anti_spoofing // When disable the Anti IP Spoofing = true, then Enable IP Forwarding = true + ip_configuration { primary = "true" name = "${var.module_var_host_name}-nic-0-link" diff --git a/msazure_vm/host_provision/module_variables.tf b/msazure_vm/host_provision/module_variables.tf index 099b1e0..b091e5d 100644 --- a/msazure_vm/host_provision/module_variables.tf +++ b/msazure_vm/host_provision/module_variables.tf @@ -184,3 +184,7 @@ variable "module_var_disk_volume_capacity_software" { variable "module_var_dns_zone_name" {} variable "module_var_dns_root_domain_name" {} + +variable "module_var_disable_ip_anti_spoofing" { + default = false +} From bd1ef1d8269a051910421697826b8b0e06645b7c Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Sat, 20 May 2023 09:22:03 +0100 Subject: [PATCH 06/25] fix: updates for storage def structure --- aws_ec2_instance/host_provision/host.tf | 8 +- .../host_provision/host_block_storage.tf | 21 +- gcp_ce_vm/host_provision/build_execution.tf | 2 - .../host_provision/build_filesystem_setup.tf | 596 ---------------- gcp_ce_vm/host_provision/build_os_prepare.tf | 13 +- .../host_provision/build_os_user_root.tf | 1 - gcp_ce_vm/host_provision/host.tf | 82 +-- .../host_provision/host_block_storage.tf | 152 +--- gcp_ce_vm/host_provision/module_variables.tf | 140 +--- msazure_vm/host_provision/build_execution.tf | 10 +- .../host_provision/build_filesystem_setup.tf | 656 ------------------ msazure_vm/host_provision/build_os_prepare.tf | 4 + msazure_vm/host_provision/host.tf | 72 +- .../host_provision/host_block_storage.tf | 164 +---- msazure_vm/host_provision/module_variables.tf | 141 +--- 15 files changed, 72 insertions(+), 1990 deletions(-) delete mode 100644 gcp_ce_vm/host_provision/build_filesystem_setup.tf delete mode 100644 msazure_vm/host_provision/build_filesystem_setup.tf diff --git a/aws_ec2_instance/host_provision/host.tf b/aws_ec2_instance/host_provision/host.tf index d7a29b4..d2c06fc 100644 --- a/aws_ec2_instance/host_provision/host.tf +++ b/aws_ec2_instance/host_provision/host.tf @@ -57,8 +57,10 @@ resource "aws_instance" "host" { # Attach EBS block disk volumes to host # AWS EBS does not accept /dev/sdaa. resource "aws_volume_attachment" "block_volume_attachment" { - count = length(aws_ebs_volume.block_volume_provision.*.id) - device_name = "/dev/sd${element(["d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"], count.index)}" +# count = length(aws_ebs_volume.block_volume_provision.*.id) + for_each = aws_ebs_volume.block_volume_provision + + device_name = "/dev/sd${element(["d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"], index(keys(aws_ebs_volume.block_volume_provision),each.key))}" instance_id = aws_instance.host.id - volume_id = element(aws_ebs_volume.block_volume_provision.*.id, count.index) + volume_id = each.value.id } diff --git a/aws_ec2_instance/host_provision/host_block_storage.tf b/aws_ec2_instance/host_provision/host_block_storage.tf index 878a55a..3518c08 100644 --- a/aws_ec2_instance/host_provision/host_block_storage.tf +++ b/aws_ec2_instance/host_provision/host_block_storage.tf @@ -2,15 +2,26 @@ # Create Block Storage resource "aws_ebs_volume" "block_volume_provision" { - count = length(var.module_var_storage_definition) +# count = sum([ for storage_item in var.module_var_storage_definition: try(storage_item.disk_count,1) ]) + + for_each = { + for disk in flatten( + [ for storage_item in var.module_var_storage_definition: + [ for index, count in range(0,try(storage_item.disk_count,1)) : + tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) + ] + ] + ): + disk.name => disk + } availability_zone = local.target_vpc_availability_zone - type = try(var.module_var_storage_definition[count.index]["disk_type"], "gp3") - size = var.module_var_storage_definition[count.index]["disk_size"] - iops = try(var.module_var_storage_definition[count.index]["disk_iops"], null) + type = each.value.disk_type + size = each.value.disk_size + iops = each.value.disk_iops tags = { - Name = "${var.module_var_host_name}-vol-${var.module_var_storage_definition[count.index].name}" + Name = "${var.module_var_host_name}-vol-${each.value.name}" } # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP diff --git a/gcp_ce_vm/host_provision/build_execution.tf b/gcp_ce_vm/host_provision/build_execution.tf index 8558517..2f01073 100644 --- a/gcp_ce_vm/host_provision/build_execution.tf +++ b/gcp_ce_vm/host_provision/build_execution.tf @@ -6,7 +6,6 @@ resource "null_resource" "execute_os_scripts" { depends_on = [ null_resource.build_script_os_user_root, null_resource.dns_resolv_files, - null_resource.build_script_fs_init, null_resource.build_script_os_prepare ] @@ -40,7 +39,6 @@ resource "null_resource" "execute_os_scripts" { "echo 'Show HOME directory for reference Shell scripts were transferred'", "ls -lha $HOME", "/home/admin/terraform_dig.sh", - "/home/admin/terraform_fs_init.sh", "/home/admin/terraform_os_prep.sh" ] } diff --git a/gcp_ce_vm/host_provision/build_filesystem_setup.tf b/gcp_ce_vm/host_provision/build_filesystem_setup.tf deleted file mode 100644 index b0017d7..0000000 --- a/gcp_ce_vm/host_provision/build_filesystem_setup.tf +++ /dev/null @@ -1,596 +0,0 @@ - -resource "null_resource" "build_script_fs_init" { - - depends_on = [ - google_compute_attached_disk.volume_attachment_hana_data, - google_compute_attached_disk.volume_attachment_hana_data_custom, - google_compute_attached_disk.volume_attachment_hana_log, - google_compute_attached_disk.volume_attachment_hana_log_custom, - google_compute_attached_disk.volume_attachment_hana_shared, - google_compute_attached_disk.volume_attachment_hana_shared_custom, - google_compute_attached_disk.volume_attachment_usr_sap, - google_compute_attached_disk.volume_attachment_sapmnt, - google_compute_attached_disk.volume_attachment_swap, - google_compute_attached_disk.volume_attachment_software, - google_compute_attached_disk.volume_attachment_anydb, - google_compute_attached_disk.volume_attachment_anydb_custom - ] - - connection { - type = "ssh" - user = "admin" - host = google_compute_instance.host.network_interface[0].network_ip - private_key = var.module_var_host_ssh_private_key - bastion_host = var.module_var_bastion_ip - bastion_port = var.module_var_bastion_ssh_port - bastion_user = var.module_var_bastion_user - bastion_private_key = var.module_var_bastion_private_ssh_key - #bastion_host_key = tls_private_key.bastion_ssh.public_key_openssh - } - - # Path must already exist and must not use Bash shell special variable, e.g. cannot use $HOME/file.sh - # "By default, OpenSSH's scp implementation runs in the remote user's home directory and so you can specify a relative path to upload into that home directory" - # https://www.terraform.io/language/resources/provisioners/file#destination-paths - provisioner "file" { - destination = "terraform_fs_init.sh" - content = <1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - #### - # Create LVM Physical Volumes - # - # This initialises the whole Disk or a Disk Partition as LVM Physical Volumes for use as part of LVM Logical Volumes - # - # First physical extent begins at 1MB which is defined by default_data_alignment in lvm.conf and this can be overriden by --dataalignment. - # Default 1MB offset from disk start before first LVM PV Physical Extent is used, - # and an additional offset after can be set using --dataalignmentoffset. - # - # I/O from the LVM Volume Group to the LVM Physical Volume will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Physical Volume data alignment offset - #### - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = "$disk_capacity_gb_specified" ]] - then - echo "Creating LVM Physical Volume for /dev/$disk_id using data alignment offset $lvm_pv_data_alignment" - pvcreate "/dev/$disk_id" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$disk_id to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$disk_id" & echo $lvm_volume_group_target_list) - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - - #### - # Create LVM Volume Groups and add LVM Physical Volumes - # Default is 1MiB offset from disk start before first LVM VG Physical Extent is used - # Default is 4MiB for the physical extent size (aka. block size), once set this is difficult to change - # - # I/O from the LVM Logical Volume to the LVM Volume Group will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Volume Group data alignment offset - # - # Therefore the LVM Volume Group extent size acts as the block size from LVM virtualization to the physical disks - #### - - echo "Creating $lvm_volume_group_name volume group with $(echo $lvm_volume_group_target_list | tr -d '\n'), using $lvm_volume_group_data_alignment data alignment and $lvm_volume_group_physical_extent_size extent size (block size)" - vgcreate --dataalignment $lvm_volume_group_data_alignment --physicalextentsize $lvm_volume_group_physical_extent_size $lvm_volume_group_name $(echo $lvm_volume_group_target_list | tr -d '\n') - echo "" - - ####### - # Create expandable LVM Logical Volume, using single or multiple physical disk volumes - # Default is 64K for the stripe size (aka. block size) - # - # I/O from the OS/Applications to the LVM Logical Volume will use the stripe size defined - # - # Therefore the LVM Logical Volume stripe size acts as the block size from OS to LVM virtualization - # IMPORTANT: Correct setting of this stripe size has impact on performance of OS and Applications read/write - ####### - - # Count number of LVM Physical Volumes in the LVM Volume Group - count_physical_volumes=$(echo "$lvm_volume_group_target_list" | wc -w) - - # Create LVM Logical Volume - # Stripe across all LVM Physical Volumes available in the LVM Volume Group - echo "Creating $lvm_logical_volume_name logical volume for $lvm_volume_group_name volume group, using $lvm_logical_volume_stripe_size extent size (block size)" - lvcreate $lvm_volume_group_name --yes --extents "100%FREE" --stripesize $lvm_logical_volume_stripe_size --stripes $count_physical_volumes --name "$lvm_logical_volume_name" - echo "" - - - ####### - # Create File System formatting for the LVM Logical Volume - # Filesystem is either XFS or EXT4 - ####### - - echo "Create File System formatting for the LVM Logical Volume" - mkfs.$filesystem_format "/dev/$lvm_volume_group_name/$lvm_logical_volume_name" - echo "" - - - ####### - # Permenant mount point - ####### - - # Note: After enabling multipath on the Linux host and rebooting the system, disk paths might appear in “/dev/UUID” form with a unique alphanumeric identifier. - # This can be seen by using the “lsblk” command on Linux. The preferred method is to use this disk path as opposed to the “/dev/sdX” path when formatting and mounting file systems. - - # Note: When adding an /etc/fstab entry for iSCSI based disk devices, use the “_netdev” mount option to ensure - # that the network link is ready before the operating system attempts to mount the disk. - - echo "Create fstab entries for $lvm_volume_group_name" - echo "# fstab entries for $lvm_volume_group_name" >> /etc/fstab - echo "/dev/$lvm_volume_group_name/$lvm_logical_volume_name $mount_point $filesystem_format defaults,noatime 0 0" >> /etc/fstab - echo "" - -} - - - - -############################################# -# Physical Volume Partition formatting -############################################# - -function physical_volume_partition_runner() { - - mount_point="$1" - disk_capacity_gb_specified="$2" - physical_partition_filesystem_block_size="$3" - physical_partition_name="$4" - filesystem_format="$5" - - # Ensure directory is available - mkdir --parents $mount_point - - # Clear any previous data entries on previously formatted disks - unset existing_disks_list - unset lvm_volume_group_target_list - unset physical_disks_list_with_gigabytes - - # Find existing disk devices and partitions - for disk in $(blkid -o device) - do - existing_disk_no_partition=$(echo "$disk" | sed 's/[0-9]\+$//') - export existing_disks_list=$(echo $existing_disk_no_partition & echo $existing_disks_list) - unset existing_disk_no_partition - done - - # Run calculations - physical_disks_list=$(lsblk --nodeps --bytes --noheadings -io KNAME,FSTYPE | awk 'BEGIN{OFS="\t"} {if (FNR>1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - if [[ $filesystem_format == "xfs" ]] - then - echo "#### XFS on Linux supports only filesystems with block sizes EQUAL to the system page size. ####" - echo "#### The disk can be formatted with up to 64 KiB, however it will fail to mount with the following error ####" - echo "# mount(2) system call failed: Function not implemented." - echo "" - echo "#### The default page size is hardcoded and cannot be changed. ####" - echo "" - echo "#### Red Hat KB: What is the maximum supported XFS block size in RHEL? - https://access.redhat.com/solutions/1614393 ####" - echo "#### Red Hat KB: Is it possible to change Page Size in Red Hat Enterprise Linux? - https://access.redhat.com/solutions/4854441 ####" - echo "" - echo "Page Size currently set to:" - getconf PAGESIZE - echo "" - fi - - page_size=$(getconf PAGESIZE) - - if [[ $filesystem_format == "xfs" ]] && [[ $(( page_size/1024 )) != $(echo $physical_partition_filesystem_block_size | sed 's/[^0-9]*//g') ]] - then - echo "Requested XFS Block Sizes are not equal to the Page Size, amend to Page Size" - echo "$mount_point requested as xfs with block size $physical_partition_filesystem_block_size, resetting to $page_size" - block_size_definition=$page_size - else - block_size_definition=$physical_partition_filesystem_block_size - fi - - - # Mount options for filesystem table. - # With only 4 KiB Page Size, only 2 in-memory log buffers are available so increase to each buffer's size (default 32kc) may increase performance - mount_options="defaults,noatime" - #mount_options="defaults,logbsize=256k" - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $disk_capacity_gb_specified ]] - then - echo "Creating Whole Disk Physical Volume Partition and File System for /dev/$disk_id at $mount_point with GPT Partition Table, start at 1MiB" - #parted --script /dev/$disk_id \ - # mklabel gpt \ - # mkpart primary $filesystem_format 1MiB 100% \ - # name 1 $physical_partition_name - parted --script /dev/$disk_id mklabel gpt - echo "Format Disk Partition with File System, with block size $block_size_definition" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/$disk_id - echo "Write Mount Points to Linux File System Table" - PhysicalDiskUUID=$(blkid /dev/$disk_id -sUUID -ovalue) - echo "UUID=$PhysicalDiskUUID $mount_point $${filesystem_format} $mount_options 0 0" >> /etc/fstab - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Swap file or partition -############################################# - -function create_swap_file() { - - echo "Create swapfile" - - swap_gb="$1" - swap_bs="128" - - swap_calc_bs=$swap_bs"M" - swap_calc_count="$((x=$swap_gb*1024,x/$swap_bs))" - dd if=/dev/zero of=/swapfile bs=$swap_calc_bs count=$swap_calc_count - chmod 600 /swapfile - mkswap /swapfile - swapon /swapfile - echo '/swapfile swap swap defaults 0 0' >> /etc/fstab - swapon --show - free -h - -} - - -function create_swap_partition() { - - find_swap_partition_by_size="$1" - - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $find_swap_partition_by_size ]] - then - echo "Create swap partition" - mkswap /dev/$disk_id - swapon /dev/$disk_id - echo "/dev/$disk_id swap swap defaults 0 0" >> /etc/fstab - swapon --show - free -h - echo "" - break - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Verify/Debug -############################################# - -storage_debug="false" - -function storage_debug_run() { - -if [ "$storage_debug" == "true" ] -then - - echo "--- Show Mount points ---" - df -h - printf "\n----------------\n\n" - - echo "--- Show /etc/fstab file ---" - cat /etc/fstab - printf "\n----------------\n\n" - - echo "--- Show Block devices ---" - blkid - printf "\n----------------\n\n" - - echo "--- Show Block devices information ---" - lsblk -o NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PHY-SEC,LOG-SEC - printf "\n----------------\n\n" - - echo "--- Show Hardware List of Disks and Volumes ---" - lshw -class disk -class volume - ###lshw -json -class disk -class volume | jq '[.logicalname, .configuration.sectorsize, .configuration.logicalsectorsize]' - ###tail -n +1 /sys/block/vd*/queue/*_block_size - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes ---" - pvs - # pvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes information ---" - pvdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups ---" - vgs - # vgs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups information ---" - vgdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes ---" - lvs - # lvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes information ---" - lvdisplay - printf "\n----------------\n\n" - -fi - -} - - - - -############################################# -# MAIN -############################################# - -function main() { - - check_os_distribution - - # Bash Functions use logic of "If injected Terraform value is true (i.e. LVM is used for the mount point) then run Bash Function". - # Ensure Bash Function is called with quotes surrounding Bash Variable of list, otherwise will expand and override other Bash Function Arguments - - echo 'Install jq' - if [ ! -f /usr/local/bin/jq ]; then curl -L 'https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64' -o jq && chmod +x jq && mv jq /usr/local/bin; fi - - # Create the required directories - mkdir --parents /hana/{shared,data,log} --mode 755 - mkdir --parents /usr/sap --mode 755 - mkdir --parents /sapmnt --mode 755 - - - # If any mount point uses LVM. i.e. IF with OR operator - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] || [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_install - fi - - - if [[ ${var.module_var_disk_volume_count_hana_data} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] - then - lvm_filesystem_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_lvm_pv_data_alignment_hana_data}" "vg_hana_data" "${var.module_var_lvm_vg_data_alignment_hana_data}" "${var.module_var_lvm_vg_physical_extent_size_hana_data}" "${var.module_var_lvm_lv_stripe_size_hana_data}" "${var.module_var_filesystem_hana_data}" - - elif [[ "${var.module_var_lvm_enable_hana_data}" == "false" ]] - then - physical_volume_partition_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_physical_partition_filesystem_block_size_hana_data}" "hana_data" "${var.module_var_filesystem_hana_data}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_log} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] - then - lvm_filesystem_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_lvm_pv_data_alignment_hana_log}" "vg_hana_log" "${var.module_var_lvm_vg_data_alignment_hana_log}" "${var.module_var_lvm_vg_physical_extent_size_hana_log}" "${var.module_var_lvm_lv_stripe_size_hana_log}" "${var.module_var_filesystem_hana_log}" - - elif [[ "${var.module_var_lvm_enable_hana_log}" == "false" ]] - then - physical_volume_partition_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_physical_partition_filesystem_block_size_hana_log}" "hana_log" "${var.module_var_filesystem_hana_log}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_shared} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] - then - lvm_filesystem_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_lvm_pv_data_alignment_hana_shared}" "vg_hana_shared" "${var.module_var_lvm_vg_data_alignment_hana_shared}" "${var.module_var_lvm_vg_physical_extent_size_hana_shared}" "${var.module_var_lvm_lv_stripe_size_hana_shared}" "${var.module_var_filesystem_hana_shared}" - - elif [[ "${var.module_var_lvm_enable_hana_shared}" == "false" ]] - then - physical_volume_partition_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_physical_partition_filesystem_block_size_hana_shared}" "hana_shared" "${var.module_var_filesystem_hana_shared}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_anydb} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_filesystem_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_lvm_pv_data_alignment_anydb}" "vg_anydb" "${var.module_var_lvm_vg_data_alignment_anydb}" "${var.module_var_lvm_vg_physical_extent_size_anydb}" "${var.module_var_lvm_lv_stripe_size_anydb}" "${var.module_var_filesystem_anydb}" - - elif [[ "${var.module_var_lvm_enable_anydb}" == "false" ]] - then - physical_volume_partition_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_physical_partition_filesystem_block_size_anydb}" "anydb" "${var.module_var_filesystem_anydb}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_usr_sap} -gt 0 ]] - then - physical_volume_partition_runner "/usr/sap" "${var.module_var_disk_volume_capacity_usr_sap}" "4k" "usr_sap" "${var.module_var_filesystem_usr_sap}" - fi - - - if [[ ${var.module_var_nfs_boolean_sapmnt} == "false" ]] && [[ ${var.module_var_disk_volume_count_sapmnt} -gt 0 ]] - then - physical_volume_partition_runner "/sapmnt" "${var.module_var_disk_volume_capacity_sapmnt}" "4k" "sapmnt" "${var.module_var_filesystem_sapmnt}" - elif [[ ${var.module_var_nfs_boolean_sapmnt} == "true" ]] - then - # Establish Google Filestore Mount via GCP VPC Subnet Private IP - google_filestore_mount_fqdn_sapmnt='${var.module_var_nfs_boolean_sapmnt ? var.module_var_nfs_fqdn_sapmnt : "null"}' - - # Install NFS - if [ "$os_type" = "rhel" ] ; then yum --assumeyes --debuglevel=1 install nfs-utils ; elif [ "$os_type" = "sles" ] ; then zypper install --no-confirm nfs-client ; fi - - # Mount Google Filestore via IP using NFSv3 - https://cloud.google.com/filestore/docs/mounting-fileshares#linux:-mount - echo "Mounting NFS for /sapmnt to Google Filestore Mount Target DNS Name: $google_filestore_mount_fqdn_sapmnt" - #sudo mount -t nfs3 -o nfsvers=3,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=3,resvport,_netdev,rw,intr $google_filestore_mount_fqdn_sapmnt /sapmnt - echo "# fstab entries for NFS" >> /etc/fstab - echo "$google_filestore_mount_fqdn_sapmnt /sapmnt nfs nfsvers=3,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,resvport,_netdev,rw,intr 0 0" >> /etc/fstab - fi - - - if [[ ${var.module_var_disk_swapfile_size_gb} -gt 0 ]] - then - create_swap_file "${var.module_var_disk_swapfile_size_gb}" - else - create_swap_partition "${var.module_var_disk_volume_capacity_swap}" - fi - - - physical_volume_partition_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "4k" "software" "xfs" - - - mount -a - -} - - -# Run script by calling 'main' Bash Function -main - - -EOF - } - -} diff --git a/gcp_ce_vm/host_provision/build_os_prepare.tf b/gcp_ce_vm/host_provision/build_os_prepare.tf index 8abec9c..a4186ba 100644 --- a/gcp_ce_vm/host_provision/build_os_prepare.tf +++ b/gcp_ce_vm/host_provision/build_os_prepare.tf @@ -2,18 +2,7 @@ resource "null_resource" "build_script_os_prepare" { depends_on = [ - google_compute_attached_disk.volume_attachment_hana_data, - google_compute_attached_disk.volume_attachment_hana_data_custom, - google_compute_attached_disk.volume_attachment_hana_log, - google_compute_attached_disk.volume_attachment_hana_log_custom, - google_compute_attached_disk.volume_attachment_hana_shared, - google_compute_attached_disk.volume_attachment_hana_shared_custom, - google_compute_attached_disk.volume_attachment_usr_sap, - google_compute_attached_disk.volume_attachment_sapmnt, - google_compute_attached_disk.volume_attachment_swap, - google_compute_attached_disk.volume_attachment_software, - google_compute_attached_disk.volume_attachment_anydb, - google_compute_attached_disk.volume_attachment_anydb_custom + google_compute_attached_disk.volume_attachment ] connection { diff --git a/gcp_ce_vm/host_provision/build_os_user_root.tf b/gcp_ce_vm/host_provision/build_os_user_root.tf index b05ce1b..4d68ff3 100644 --- a/gcp_ce_vm/host_provision/build_os_user_root.tf +++ b/gcp_ce_vm/host_provision/build_os_user_root.tf @@ -3,7 +3,6 @@ resource "null_resource" "build_script_os_user_root" { depends_on = [ null_resource.dns_resolv_files, - null_resource.build_script_fs_init, null_resource.build_script_os_prepare ] diff --git a/gcp_ce_vm/host_provision/host.tf b/gcp_ce_vm/host_provision/host.tf index 6b8e2e7..adc5f7e 100644 --- a/gcp_ce_vm/host_provision/host.tf +++ b/gcp_ce_vm/host_provision/host.tf @@ -40,85 +40,11 @@ resource "google_compute_instance" "host" { # Attach GCP Zonal Persistent Disk block storage volumes to host -resource "google_compute_attached_disk" "volume_attachment_hana_data" { - count = length(google_compute_disk.block_volume_hana_data_voltype.*.id) - disk = google_compute_disk.block_volume_hana_data_voltype[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_hana_data_custom" { - count = length(google_compute_disk.block_volume_hana_data_custom.*.id) - disk = google_compute_disk.block_volume_hana_data_custom[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_hana_log" { - count = length(google_compute_disk.block_volume_hana_log_voltype.*.id) - disk = google_compute_disk.block_volume_hana_log_voltype[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_hana_log_custom" { - count = length(google_compute_disk.block_volume_hana_log_custom.*.id) - disk = google_compute_disk.block_volume_hana_log_custom[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_hana_shared" { - count = length(google_compute_disk.block_volume_hana_shared_voltype.*.id) - disk = google_compute_disk.block_volume_hana_shared_voltype[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_hana_shared_custom" { - count = length(google_compute_disk.block_volume_hana_shared_custom.*.id) - disk = google_compute_disk.block_volume_hana_shared_custom[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_anydb" { - count = length(google_compute_disk.block_volume_anydb_voltype.*.id) - disk = google_compute_disk.block_volume_anydb_voltype[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_anydb_custom" { - count = length(google_compute_disk.block_volume_anydb_custom.*.id) - disk = google_compute_disk.block_volume_anydb_custom[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_usr_sap" { - count = length(google_compute_disk.block_volume_usr_sap_voltype.*.id) - disk = google_compute_disk.block_volume_usr_sap_voltype[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_sapmnt" { - count = length(google_compute_disk.block_volume_sapmnt_voltype.*.id) - disk = google_compute_disk.block_volume_sapmnt_voltype[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} - -resource "google_compute_attached_disk" "volume_attachment_swap" { - count = length(google_compute_disk.block_volume_swap_voltype.*.id) - disk = google_compute_disk.block_volume_swap_voltype[count.index].id - instance = google_compute_instance.host.id - mode = "READ_WRITE" -} +resource "google_compute_attached_disk" "volume_attachment" { +# count = length(google_compute_disk.block_volume.*.id) + for_each = google_compute_disk.block_volume -resource "google_compute_attached_disk" "volume_attachment_software" { - disk = google_compute_disk.block_volume_software_voltype.id + disk = each.value.id instance = google_compute_instance.host.id mode = "READ_WRITE" } diff --git a/gcp_ce_vm/host_provision/host_block_storage.tf b/gcp_ce_vm/host_provision/host_block_storage.tf index f0efa42..2858b8a 100644 --- a/gcp_ce_vm/host_provision/host_block_storage.tf +++ b/gcp_ce_vm/host_provision/host_block_storage.tf @@ -6,145 +6,25 @@ # pd-ssd, Performance (SSD) persistent disks # pd-extreme, Extreme persistent disks +resource "google_compute_disk" "block_volume" { +# count = sum([ for storage_item in var.module_var_storage_definition: try(storage_item.disk_count,1) ]) -resource "google_compute_disk" "block_volume_hana_data_voltype" { - count = var.module_var_disk_volume_type_hana_data != "custom" ? var.module_var_disk_volume_count_hana_data : 0 + for_each = { + for disk in flatten( + [ for storage_item in var.module_var_storage_definition: + [ for index, count in range(0,try(storage_item.disk_count,1)) : + tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) + ] + ] + ): + disk.name => disk + } zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-hana-data-${count.index}" - type = var.module_var_disk_volume_type_hana_data - size = var.module_var_disk_volume_capacity_hana_data - physical_block_size_bytes = 4096 -# interface = "SCSI" -} - -resource "google_compute_disk" "block_volume_hana_data_custom" { - count = var.module_var_disk_volume_type_hana_data == "custom" ? var.module_var_disk_volume_count_hana_data : 0 - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-hana-data-${count.index}" - type = var.module_var_disk_volume_type_hana_data - size = var.module_var_disk_volume_capacity_hana_data - physical_block_size_bytes = 4096 -# interface = "SCSI" - provisioned_iops = var.module_var_disk_volume_iops_hana_data -} - - - -resource "google_compute_disk" "block_volume_hana_log_voltype" { - count = var.module_var_disk_volume_type_hana_log != "custom" ? var.module_var_disk_volume_count_hana_log : 0 - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-hana-log-${count.index}" - type = var.module_var_disk_volume_type_hana_log - size = var.module_var_disk_volume_capacity_hana_log - physical_block_size_bytes = 4096 -# interface = "SCSI" -} - -resource "google_compute_disk" "block_volume_hana_log_custom" { - count = var.module_var_disk_volume_type_hana_log == "custom" ? var.module_var_disk_volume_count_hana_log : 0 - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-hana-log-${count.index}" - type = var.module_var_disk_volume_type_hana_log - size = var.module_var_disk_volume_capacity_hana_log - physical_block_size_bytes = 4096 -# interface = "SCSI" - provisioned_iops = var.module_var_disk_volume_iops_hana_log -} - - - -resource "google_compute_disk" "block_volume_hana_shared_voltype" { - count = var.module_var_disk_volume_type_hana_shared != "custom" ? var.module_var_disk_volume_count_hana_shared : 0 - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-hana-shared-${count.index}" - type = var.module_var_disk_volume_type_hana_shared - size = var.module_var_disk_volume_capacity_hana_shared - physical_block_size_bytes = 4096 -# interface = "SCSI" -} - -resource "google_compute_disk" "block_volume_hana_shared_custom" { - count = var.module_var_disk_volume_type_hana_shared == "custom" ? var.module_var_disk_volume_count_hana_shared : 0 - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-hana-shared-${count.index}" - type = var.module_var_disk_volume_type_hana_shared - size = var.module_var_disk_volume_capacity_hana_shared - physical_block_size_bytes = 4096 -# interface = "SCSI" - provisioned_iops = var.module_var_disk_volume_iops_hana_shared -} - - - -resource "google_compute_disk" "block_volume_anydb_voltype" { - count = var.module_var_disk_volume_type_anydb != "custom" ? var.module_var_disk_volume_count_anydb : 0 - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-anydb-${count.index}" - type = var.module_var_disk_volume_type_anydb - size = var.module_var_disk_volume_capacity_anydb - physical_block_size_bytes = 4096 -# interface = "SCSI" -} - -resource "google_compute_disk" "block_volume_anydb_custom" { - count = var.module_var_disk_volume_type_anydb == "custom" ? var.module_var_disk_volume_count_anydb : 0 - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-anydb-${count.index}" - type = var.module_var_disk_volume_type_anydb - size = var.module_var_disk_volume_capacity_anydb - physical_block_size_bytes = 4096 -# interface = "SCSI" - provisioned_iops = var.module_var_disk_volume_iops_anydb -} - - - -resource "google_compute_disk" "block_volume_usr_sap_voltype" { - count = var.module_var_disk_volume_count_usr_sap - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-usr-sap-${count.index}" - type = var.module_var_disk_volume_type_usr_sap - size = var.module_var_disk_volume_capacity_usr_sap - physical_block_size_bytes = 4096 -# interface = "SCSI" -} - -resource "google_compute_disk" "block_volume_sapmnt_voltype" { - count = var.module_var_disk_volume_count_sapmnt - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-sapmnt-${count.index}" - type = var.module_var_disk_volume_type_sapmnt - size = var.module_var_disk_volume_capacity_sapmnt - physical_block_size_bytes = 4096 -# interface = "SCSI" -} - -resource "google_compute_disk" "block_volume_swap_voltype" { - count = var.module_var_disk_volume_count_swap - - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-swap-${count.index}" - type = var.module_var_disk_volume_type_swap - size = var.module_var_disk_volume_capacity_swap - physical_block_size_bytes = 4096 -# interface = "SCSI" -} - -resource "google_compute_disk" "block_volume_software_voltype" { - zone = var.module_var_gcp_region_zone - name = "${var.module_var_virtual_machine_hostname}-volume-software" - type = var.module_var_disk_volume_type_software - size = var.module_var_disk_volume_capacity_software + name = "${var.module_var_virtual_machine_hostname}-vol-${each.value.name}" + type = each.value.disk_type + size = each.value.disk_size physical_block_size_bytes = 4096 # interface = "SCSI" + provisioned_iops = each.value.disk_iops } diff --git a/gcp_ce_vm/host_provision/module_variables.tf b/gcp_ce_vm/host_provision/module_variables.tf index 6e5352b..d661526 100644 --- a/gcp_ce_vm/host_provision/module_variables.tf +++ b/gcp_ce_vm/host_provision/module_variables.tf @@ -22,144 +22,8 @@ variable "module_var_host_ssh_private_key" {} variable "module_var_virtual_machine_hostname" {} variable "module_var_virtual_machine_profile" {} +variable "module_var_storage_definition" {} -variable "module_var_disk_volume_type_hana_data" {} -variable "module_var_disk_volume_count_hana_data" {} -variable "module_var_disk_volume_capacity_hana_data" {} -variable "module_var_disk_volume_iops_hana_data" { - default = null -} -variable "module_var_lvm_enable_hana_data" {} -variable "module_var_lvm_pv_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_data" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_data" { - default = "64K" -} -variable "module_var_filesystem_hana_data" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_data" {} - - -variable "module_var_disk_volume_type_hana_log" {} -variable "module_var_disk_volume_count_hana_log" {} -variable "module_var_disk_volume_capacity_hana_log" {} -variable "module_var_disk_volume_iops_hana_log" { - default = null -} -variable "module_var_lvm_enable_hana_log" {} -variable "module_var_lvm_pv_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_log" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_log" { - default = "64K" -} -variable "module_var_filesystem_hana_log" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_log" {} - - -variable "module_var_disk_volume_type_hana_shared" {} -variable "module_var_disk_volume_count_hana_shared" {} -variable "module_var_disk_volume_capacity_hana_shared" {} -variable "module_var_disk_volume_iops_hana_shared" { - default = null -} -variable "module_var_lvm_enable_hana_shared" {} -variable "module_var_lvm_pv_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_shared" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_shared" { - default = "64K" -} -variable "module_var_filesystem_hana_shared" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_shared" {} - -variable "module_var_disk_volume_type_anydb" {} -variable "module_var_disk_volume_count_anydb" {} -variable "module_var_disk_volume_capacity_anydb" {} -variable "module_var_disk_volume_iops_anydb" { - default = null -} -variable "module_var_lvm_enable_anydb" { +variable "module_var_disable_ip_anti_spoofing" { default = false } -variable "module_var_lvm_pv_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_anydb" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_anydb" { - default = "64K" -} -variable "module_var_filesystem_mount_path_anydb" { -} -variable "module_var_filesystem_anydb" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_anydb" { - default = "4k" -} - - -variable "module_var_disk_volume_count_usr_sap" {} -variable "module_var_disk_volume_type_usr_sap" {} -variable "module_var_disk_volume_capacity_usr_sap" {} -variable "module_var_filesystem_usr_sap" { - default = "xfs" -} - - -variable "module_var_disk_volume_count_sapmnt" {} -variable "module_var_disk_volume_type_sapmnt" {} -variable "module_var_disk_volume_capacity_sapmnt" {} -variable "module_var_filesystem_sapmnt" { - default = "xfs" -} -variable "module_var_nfs_boolean_sapmnt" {} -variable "module_var_nfs_fqdn_sapmnt" {} - -variable "module_var_disk_swapfile_size_gb" {} -variable "module_var_disk_volume_count_swap" {} -variable "module_var_disk_volume_type_swap" {} -variable "module_var_disk_volume_capacity_swap" {} -variable "module_var_filesystem_swap" { - default = "xfs" -} - - -variable "module_var_sap_software_download_directory" { - default = "/software" -} -variable "module_var_disk_volume_capacity_software" { - default = 525 -} -variable "module_var_disk_volume_type_software" { -} - diff --git a/msazure_vm/host_provision/build_execution.tf b/msazure_vm/host_provision/build_execution.tf index 7bdda6c..8873bd5 100644 --- a/msazure_vm/host_provision/build_execution.tf +++ b/msazure_vm/host_provision/build_execution.tf @@ -5,15 +5,8 @@ resource "null_resource" "execute_os_scripts" { depends_on = [ null_resource.dns_resolv_files, - null_resource.build_script_fs_init, null_resource.build_script_os_prepare, - azurerm_virtual_machine_data_disk_attachment.volume_attachment_hana_data, - azurerm_virtual_machine_data_disk_attachment.volume_attachment_hana_log, - azurerm_virtual_machine_data_disk_attachment.volume_attachment_hana_shared, - azurerm_virtual_machine_data_disk_attachment.volume_attachment_usr_sap, - azurerm_virtual_machine_data_disk_attachment.volume_attachment_sapmnt, - azurerm_virtual_machine_data_disk_attachment.volume_attachment_swap, - azurerm_virtual_machine_data_disk_attachment.volume_attachment_software + azurerm_virtual_machine_data_disk_attachment.volume_attachment ] connection { @@ -49,7 +42,6 @@ resource "null_resource" "execute_os_scripts" { "echo 'Show HOME directory for reference Shell scripts were transferred'", "ls -lha $HOME", "/root/terraform_dig.sh", - "/root/terraform_fs_init.sh", "/root/terraform_os_prep.sh" ] } diff --git a/msazure_vm/host_provision/build_filesystem_setup.tf b/msazure_vm/host_provision/build_filesystem_setup.tf deleted file mode 100644 index 2b83cbb..0000000 --- a/msazure_vm/host_provision/build_filesystem_setup.tf +++ /dev/null @@ -1,656 +0,0 @@ - -resource "null_resource" "build_script_fs_init" { - - connection { - type = "ssh" - user = "root" - host = azurerm_linux_virtual_machine.host.private_ip_address - private_key = var.module_var_host_ssh_private_key - bastion_host = var.module_var_bastion_ip - bastion_port = var.module_var_bastion_ssh_port - bastion_user = var.module_var_bastion_user - bastion_private_key = var.module_var_bastion_private_ssh_key - #bastion_host_key = tls_private_key.bastion_ssh.public_key_openssh - } - - # Path must already exist and must not use Bash shell special variable, e.g. cannot use $HOME/file.sh - # "By default, OpenSSH's scp implementation runs in the remote user's home directory and so you can specify a relative path to upload into that home directory" - # https://www.terraform.io/language/resources/provisioners/file#destination-paths - provisioner "file" { - destination = "terraform_fs_init.sh" - content = <1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - #### - # Create LVM Physical Volumes - # - # This initialises the whole Disk or a Disk Partition as LVM Physical Volumes for use as part of LVM Logical Volumes - # - # First physical extent begins at 1MB which is defined by default_data_alignment in lvm.conf and this can be overriden by --dataalignment. - # Default 1MB offset from disk start before first LVM PV Physical Extent is used, - # and an additional offset after can be set using --dataalignmentoffset. - # - # I/O from the LVM Volume Group to the LVM Physical Volume will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Physical Volume data alignment offset - #### - - - #### - # MS Azure other storage notes and commands - #### - # - # Output IMDS to JSON - #curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute?api-version=2020-09-01" > imds_output.json - # - # Output IMDS to single string - #curl -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2020-09-01&format=text" - # - # Check Azure VM LUNs - #ls -lha /dev/disk/azure/scsi1/lun* - # - # AZURE: IF multipath_component_detection in /etc/lvm/lvm.conf is set to = 1 (enable) - # this results in LVM pvcreate error: - # Cannot use /dev/sdX: device is a multipath component - # - # AZURE: IF multipath_component_detection in /etc/lvm/lvm.conf is set to = 0 (disable) - # this results in LVM pvcreate error: - # Can't open /dev/sdX exclusively. Mounted filesystem? - # Can't open /dev/sdX exclusively. Mounted filesystem? - # - # AZURE: Show multipath devices: - # multipath -l - # - # AZURE: Map Disk Volumes to Device Mapper (DM) Multipath - # multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\" - - - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = "$disk_capacity_gb_specified" ]] - then - echo "Creating LVM Physical Volume for /dev/$disk_id using data alignment offset $lvm_pv_data_alignment" - if [[ $(cat /run/cloud-init/instance-data.json | jq -r '.v1.platform') == "azure" && $azure_disks_multipath_bool == "true" ]] - then - echo "MS Azure detected: Detecting Multipath DM" - multipath_map=$(multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\") - multipath_dm_name=$(printf "$multipath_map" | awk "/$disk_id/ { print \$2 }") - echo "/dev/$disk_id is multipath /dev/$multipath_dm_name, using this for pvcreate" - pvcreate "/dev/$multipath_dm_name" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$multipath_dm_name to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$multipath_dm_name" & echo $lvm_volume_group_target_list) - else - pvcreate "/dev/$disk_id" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$disk_id to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$disk_id" & echo $lvm_volume_group_target_list) - fi - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - - #### - # Create LVM Volume Groups and add LVM Physical Volumes - # Default is 1MiB offset from disk start before first LVM VG Physical Extent is used - # Default is 4MiB for the physical extent size (aka. block size), once set this is difficult to change - # - # I/O from the LVM Logical Volume to the LVM Volume Group will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Volume Group data alignment offset - # - # Therefore the LVM Volume Group extent size acts as the block size from LVM virtualization to the physical disks - #### - - echo "Creating $lvm_volume_group_name volume group with $(echo $lvm_volume_group_target_list | tr -d '\n'), using $lvm_volume_group_data_alignment data alignment and $lvm_volume_group_physical_extent_size extent size (block size)" - vgcreate --dataalignment $lvm_volume_group_data_alignment --physicalextentsize $lvm_volume_group_physical_extent_size $lvm_volume_group_name $(echo $lvm_volume_group_target_list | tr -d '\n') - - echo "" - - ####### - # Create expandable LVM Logical Volume, using single or multiple physical disk volumes - # Default is 64K for the stripe size (aka. block size) - # - # I/O from the OS/Applications to the LVM Logical Volume will use the stripe size defined - # - # Therefore the LVM Logical Volume stripe size acts as the block size from OS to LVM virtualization - # IMPORTANT: Correct setting of this stripe size has impact on performance of OS and Applications read/write - ####### - - # Count number of LVM Physical Volumes in the LVM Volume Group - count_physical_volumes=$(echo "$lvm_volume_group_target_list" | wc -w) - - # Create LVM Logical Volume - # Stripe across all LVM Physical Volumes available in the LVM Volume Group - echo "Creating $lvm_logical_volume_name logical volume for $lvm_volume_group_name volume group, using $lvm_logical_volume_stripe_size extent size (block size)" - lvcreate $lvm_volume_group_name --yes --extents "100%FREE" --stripesize $lvm_logical_volume_stripe_size --stripes $count_physical_volumes --name "$lvm_logical_volume_name" - echo "" - - - ####### - # Create File System formatting for the LVM Logical Volume - # Filesystem is either XFS or EXT4 - ####### - - echo "Create File System formatting for the LVM Logical Volume" - mkfs.$filesystem_format "/dev/$lvm_volume_group_name/$lvm_logical_volume_name" - echo "" - - - ####### - # Permenant mount point - ####### - - # Note: After enabling multipath on the Linux host and rebooting the system, disk paths might appear in “/dev/UUID” form with a unique alphanumeric identifier. - # This can be seen by using the “lsblk” command on Linux. The preferred method is to use this disk path as opposed to the “/dev/sdX” path when formatting and mounting file systems. - - # Note: When adding an /etc/fstab entry for iSCSI based disk devices, use the “_netdev” mount option to ensure - # that the network link is ready before the operating system attempts to mount the disk. - - echo "Create fstab entries for $lvm_volume_group_name" - echo "# fstab entries for $lvm_volume_group_name" >> /etc/fstab - echo "/dev/$lvm_volume_group_name/$lvm_logical_volume_name $mount_point $filesystem_format defaults,noatime 0 0" >> /etc/fstab - echo "" - -} - - - - -############################################# -# Physical Volume Partition formatting -############################################# - -function physical_volume_partition_runner() { - - mount_point="$1" - disk_capacity_gb_specified="$2" - physical_partition_filesystem_block_size="$3" - physical_partition_name="$4" - filesystem_format="$5" - azure_disks_multipath_bool="$6" - - # Ensure directory is available - mkdir --parents $mount_point - - # Clear any previous data entries on previously formatted disks - unset existing_disks_list - unset lvm_volume_group_target_list - unset physical_disks_list_with_gigabytes - - # Find existing disk devices and partitions - for disk in $(blkid -o device) - do - existing_disk_no_partition=$(echo "$disk" | sed 's/[0-9]\+$//') - export existing_disks_list=$(echo $existing_disk_no_partition & echo $existing_disks_list) - unset existing_disk_no_partition - done - - # Run calculations - physical_disks_list=$(lsblk --nodeps --bytes --noheadings -io KNAME,FSTYPE | awk 'BEGIN{OFS="\t"} {if (FNR>1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - if [[ $filesystem_format == "xfs" ]] - then - echo "#### XFS on Linux supports only filesystems with block sizes EQUAL to the system page size. ####" - echo "#### The disk can be formatted with up to 64 KiB, however it will fail to mount with the following error ####" - echo "# mount(2) system call failed: Function not implemented." - echo "" - echo "#### The default page size is hardcoded and cannot be changed. ####" - echo "" - echo "#### Red Hat KB: What is the maximum supported XFS block size in RHEL? - https://access.redhat.com/solutions/1614393 ####" - echo "#### Red Hat KB: Is it possible to change Page Size in Red Hat Enterprise Linux? - https://access.redhat.com/solutions/4854441 ####" - echo "" - echo "Page Size currently set to:" - getconf PAGESIZE - echo "" - fi - - page_size=$(getconf PAGESIZE) - - if [[ $filesystem_format == "xfs" ]] && [[ $(( page_size/1024 )) != $(echo $physical_partition_filesystem_block_size | sed 's/[^0-9]*//g') ]] - then - echo "Requested XFS Block Sizes are not equal to the Page Size, amend to Page Size" - echo "$mount_point requested as xfs with block size $physical_partition_filesystem_block_size, resetting to $page_size" - block_size_definition=$page_size - else - block_size_definition=$physical_partition_filesystem_block_size - fi - - - # Mount options for filesystem table. - # With only 4 KiB Page Size, only 2 in-memory log buffers are available so increase to each buffer's size (default 32kc) may increase performance - mount_options="defaults,noatime" - #mount_options="defaults,logbsize=256k" - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $disk_capacity_gb_specified ]] - then - echo "Creating Whole Disk Physical Volume Partition and File System for /dev/$disk_id at $mount_point with GPT Partition Table, start at 1MiB" - if [[ $(cat /run/cloud-init/instance-data.json | jq -r '.v1.platform') == "azure" && $azure_disks_multipath_bool == "true" ]] - then - echo "MS Azure detected: Detecting Multipath DM" - multipath_map=$(multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\") - multipath_dm_name=$(printf "$multipath_map" | awk "/$disk_id/ { print \$2 }") - echo "/dev/$disk_id is multipath /dev/$multipath_dm_name, using this for parted and mkfs" - parted --script /dev/$multipath_dm_name \ - mklabel gpt \ - mkpart primary $filesystem_format 1MiB 100% \ - name 1 $physical_partition_name - echo "Format Disk Partition with File System, with block size $block_size_definition" - partition_id=$(lsblk /dev/$disk_id -o NAME,TYPE --raw | awk "/part/ { print \$1 }" | awk '!/sd/') - echo "Disk Partition ID = $partition_id" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/mapper/$partition_id - echo "Write Mount Points to Linux File System Table" - # PhysicalDiskUUID=$(blkid /dev/$multipath_dm_name -s PTUUID -o value) - echo "/dev/mapper/$partition_id $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - else - parted --script /dev/$disk_id \ - mklabel gpt \ - mkpart primary $filesystem_format 1MiB 100% \ - name 1 $physical_partition_name - echo "Format Disk Partition with File System, with block size $block_size_definition" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/$disk_id - echo "Write Mount Points to Linux File System Table" - PhysicalDiskUUID=$(blkid /dev/$disk_id -sUUID -ovalue) - echo "UUID=$PhysicalDiskUUID $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - fi - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Swap file or partition -############################################# - -function create_swap_file() { - - echo "Create swapfile (on /usr when using Azure)" - - swap_gb="$1" - swap_bs="128" - - swap_calc_bs=$swap_bs"M" - swap_calc_count="$((x=$swap_gb*1024,x/$swap_bs))" - dd if=/dev/zero of=/usr/swapfile bs=$swap_calc_bs count=$swap_calc_count - chmod 600 /usr/swapfile - mkswap /usr/swapfile - swapon /usr/swapfile - echo '/usr/swapfile swap swap defaults 0 0' >> /etc/fstab - swapon --show - free -h - -} - - -function create_swap_partition() { - - find_swap_partition_by_size="$1" - - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $find_swap_partition_by_size ]] - then - echo "Create swap partition" - mkswap /dev/$disk_id - swapon /dev/$disk_id - echo "/dev/$disk_id swap swap defaults 0 0" >> /etc/fstab - swapon --show - free -h - echo "" - break - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Verify/Debug -############################################# - -storage_debug="false" - -function storage_debug_run() { - -if [ "$storage_debug" == "true" ] -then - - echo "--- Show Mount points ---" - df -h - printf "\n----------------\n\n" - - echo "--- Show /etc/fstab file ---" - cat /etc/fstab - printf "\n----------------\n\n" - - echo "--- Show Block devices ---" - blkid - printf "\n----------------\n\n" - - echo "--- Show Block devices information ---" - lsblk -o NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PHY-SEC,LOG-SEC - printf "\n----------------\n\n" - - echo "--- Show Hardware List of Disks and Volumes ---" - lshw -class disk -class volume - ###lshw -json -class disk -class volume | jq '[.logicalname, .configuration.sectorsize, .configuration.logicalsectorsize]' - ###tail -n +1 /sys/block/vd*/queue/*_block_size - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes ---" - pvs - # pvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes information ---" - pvdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups ---" - vgs - # vgs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups information ---" - vgdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes ---" - lvs - # lvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes information ---" - lvdisplay - printf "\n----------------\n\n" - -fi - -} - - - - -############################################# -# MAIN -############################################# - -function main() { - - check_os_distribution - - # Bash Functions use logic of "If injected Terraform value is true (i.e. LVM is used for the mount point) then run Bash Function". - # Ensure Bash Function is called with quotes surrounding Bash Variable of list, otherwise will expand and override other Bash Function Arguments - - echo 'Install jq' - if [ ! -f /usr/local/bin/jq ]; then curl -L 'https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64' -o jq && chmod +x jq && mv jq /usr/local/bin; fi - - # Create the required directories - mkdir --parents /hana/{shared,data,log} --mode 755 - mkdir --parents /usr/sap --mode 755 - mkdir --parents /sapmnt --mode 755 - - - # If any mount point uses LVM. i.e. IF with OR operator - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] || [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_install - fi - - - if [[ ${var.module_var_disk_volume_count_hana_data} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] - then - lvm_filesystem_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_lvm_pv_data_alignment_hana_data}" "vg_hana_data" "${var.module_var_lvm_vg_data_alignment_hana_data}" "${var.module_var_lvm_vg_physical_extent_size_hana_data}" "${var.module_var_lvm_lv_stripe_size_hana_data}" "${var.module_var_filesystem_hana_data}" "${can(regex("^[P].*",var.module_var_disk_volume_type_hana_data))}" - - elif [[ "${var.module_var_lvm_enable_hana_data}" == "false" ]] - then - physical_volume_partition_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_physical_partition_filesystem_block_size_hana_data}" "hana_data" "${var.module_var_filesystem_hana_data}" "${can(regex("^[P].*",var.module_var_disk_volume_type_hana_data))}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_log} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] - then - lvm_filesystem_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_lvm_pv_data_alignment_hana_log}" "vg_hana_log" "${var.module_var_lvm_vg_data_alignment_hana_log}" "${var.module_var_lvm_vg_physical_extent_size_hana_log}" "${var.module_var_lvm_lv_stripe_size_hana_log}" "${var.module_var_filesystem_hana_log}" "${can(regex("^[P].*",var.module_var_disk_volume_type_hana_log))}" - - elif [[ "${var.module_var_lvm_enable_hana_log}" == "false" ]] - then - physical_volume_partition_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_physical_partition_filesystem_block_size_hana_log}" "hana_log" "${var.module_var_filesystem_hana_log}" "${can(regex("^[P].*",var.module_var_disk_volume_type_hana_log))}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_shared} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] - then - lvm_filesystem_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_lvm_pv_data_alignment_hana_shared}" "vg_hana_shared" "${var.module_var_lvm_vg_data_alignment_hana_shared}" "${var.module_var_lvm_vg_physical_extent_size_hana_shared}" "${var.module_var_lvm_lv_stripe_size_hana_shared}" "${var.module_var_filesystem_hana_shared}" "${can(regex("^[P].*",var.module_var_disk_volume_type_hana_shared))}" - - elif [[ "${var.module_var_lvm_enable_hana_shared}" == "false" ]] - then - physical_volume_partition_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_physical_partition_filesystem_block_size_hana_shared}" "hana_shared" "${var.module_var_filesystem_hana_shared}" "${can(regex("^[P].*",var.module_var_disk_volume_type_hana_shared))}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_anydb} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_filesystem_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_lvm_pv_data_alignment_anydb}" "vg_anydb" "${var.module_var_lvm_vg_data_alignment_anydb}" "${var.module_var_lvm_vg_physical_extent_size_anydb}" "${var.module_var_lvm_lv_stripe_size_anydb}" "${var.module_var_filesystem_anydb}" "${can(regex("^[P].*",var.module_var_disk_volume_type_anydb))}" - - elif [[ "${var.module_var_lvm_enable_anydb}" == "false" ]] - then - physical_volume_partition_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_physical_partition_filesystem_block_size_anydb}" "anydb" "${var.module_var_filesystem_anydb}" "${can(regex("^[P].*",var.module_var_disk_volume_type_anydb))}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_usr_sap} -gt 0 ]] - then - physical_volume_partition_runner "/usr/sap" "${var.module_var_disk_volume_capacity_usr_sap}" "4k" "usr_sap" "${var.module_var_filesystem_usr_sap}" "${can(regex("^[P].*",var.module_var_disk_volume_type_usr_sap))}" - fi - - - if [[ ${var.module_var_nfs_boolean_sapmnt} == "false" ]] && [[ ${var.module_var_disk_volume_count_sapmnt} -gt 0 ]] - then - physical_volume_partition_runner "/sapmnt" "${var.module_var_disk_volume_capacity_sapmnt}" "4k" "sapmnt" "${var.module_var_filesystem_sapmnt}" "${can(regex("^[P].*",var.module_var_disk_volume_type_sapmnt))}" - elif [[ ${var.module_var_nfs_boolean_sapmnt} == "true" ]] - then - # Establish Azure Files Mount Target DNS Name - azure_files_mount_fqdn_sapmnt='${var.module_var_nfs_boolean_sapmnt ? var.module_var_nfs_fqdn_sapmnt : "null"}' - - # Recommend OS mount of the Azure Files via the DNS FQDN, which resolves to the IP Address of the Azure Files mount target - # Usually to increase network latency performance for SAP NetWeaver read/write operations, IP Addresses should be used - # However, the NFS protocol performs DNS lookup at mount time, and stores into the local host DNS cache - based on the DNS TTL defined by the Azure DNS Name Server - # As this activity is infrequent, it should not impact performance to set to the FQDN of the Azure Files Mount Target - - # Install NFS - if [ "$os_type" = "rhel" ] ; then yum --assumeyes --debuglevel=1 install nfs-utils ; elif [ "$os_type" = "sles" ] ; then zypper install --no-confirm nfs-client ; fi - - # Mount Azure Files via DNS FQDN - echo "Mounting NFS for /sapmnt to Azure Files Mount Target DNS Name: $azure_files_mount_fqdn_sapmnt" - #sudo mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport $azure_files_mount_fqdn_sapmnt /sapmnt - echo "# fstab entries for NFS" >> /etc/fstab - echo "$azure_files_mount_fqdn_sapmnt /sapmnt nfs4 nfsvers=4.1,rsize=262144,wsize=262144,hard,timeo=600,retrans=2,sec=sys,noatime,proto=tcp,namlen=255 0 0" >> /etc/fstab - fi - - - if [[ ${var.module_var_disk_swapfile_size_gb} -gt 0 ]] - then - create_swap_file "${var.module_var_disk_swapfile_size_gb}" - else - create_swap_partition "${var.module_var_disk_volume_capacity_swap}" - fi - - - echo "Check if Multipath was activated, then alter setup of /software mount point (which is static set to Premium_LRS to avoid heavy installation delay)" - if multipath; then - use_multipath="true" - else - use_multipath="false" - fi - - physical_volume_partition_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "4k" "software" "xfs" "$use_multipath" - - - mount -a - -} - - -# Run script by calling 'main' Bash Function -main - - -EOF - } - -} diff --git a/msazure_vm/host_provision/build_os_prepare.tf b/msazure_vm/host_provision/build_os_prepare.tf index 408d2e7..c821473 100644 --- a/msazure_vm/host_provision/build_os_prepare.tf +++ b/msazure_vm/host_provision/build_os_prepare.tf @@ -1,6 +1,10 @@ resource "null_resource" "build_script_os_prepare" { + depends_on = [ + azurerm_virtual_machine_data_disk_attachment.volume_attachment + ] + connection { type = "ssh" user = "root" diff --git a/msazure_vm/host_provision/host.tf b/msazure_vm/host_provision/host.tf index c243e7f..247a2c5 100644 --- a/msazure_vm/host_provision/host.tf +++ b/msazure_vm/host_provision/host.tf @@ -93,74 +93,12 @@ resource "azurerm_linux_virtual_machine" "host" { # Attach block disk volumes to host -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_hana_data" { - count = length(azurerm_managed_disk.block_volume_hana_data_voltype.*.id) +resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment" { +# count = length([for vol in azurerm_managed_disk.block_volume : vol.name]) + for_each = azurerm_managed_disk.block_volume - managed_disk_id = azurerm_managed_disk.block_volume_hana_data_voltype[count.index].id + managed_disk_id = each.value.id virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(10 + count.index) + lun = tostring(10 + index(keys(azurerm_managed_disk.block_volume),each.key)) # Maximum LUN range is 63 caching = "None" } - -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_hana_log" { - count = length(azurerm_managed_disk.block_volume_hana_log_voltype.*.id) - - managed_disk_id = azurerm_managed_disk.block_volume_hana_log_voltype[count.index].id - virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(20 + count.index) - caching = "None" -} - -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_hana_shared" { - count = length(azurerm_managed_disk.block_volume_hana_shared_voltype.*.id) - - managed_disk_id = azurerm_managed_disk.block_volume_hana_shared_voltype[count.index].id - virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(30 + count.index) - caching = "ReadOnly" -} - -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_anydb" { - count = length(azurerm_managed_disk.block_volume_anydb_voltype.*.id) - - managed_disk_id = azurerm_managed_disk.block_volume_anydb_voltype[count.index].id - virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(40 + count.index) - caching = "ReadOnly" -} - - -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_usr_sap" { - count = length(azurerm_managed_disk.block_volume_usr_sap_voltype.*.id) - - managed_disk_id = azurerm_managed_disk.block_volume_usr_sap_voltype[count.index].id - virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(45 + count.index) - caching = "ReadWrite" -} - -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_sapmnt" { - count = var.module_var_nfs_boolean_sapmnt ? 0 : length(azurerm_managed_disk.block_volume_sapmnt_voltype.*.id) - - managed_disk_id = azurerm_managed_disk.block_volume_sapmnt_voltype[count.index].id - virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(50 + count.index) - caching = "ReadWrite" -} - -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_swap" { - count = length(azurerm_managed_disk.block_volume_swap_voltype.*.id) - - managed_disk_id = azurerm_managed_disk.block_volume_swap_voltype[count.index].id - virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(55 + count.index) - caching = "ReadWrite" -} - -# Maximum LUN range is 63, set to last -resource "azurerm_virtual_machine_data_disk_attachment" "volume_attachment_software" { - managed_disk_id = azurerm_managed_disk.block_volume_software_voltype.id - virtual_machine_id = azurerm_linux_virtual_machine.host.id - lun = tostring(63) - caching = "ReadWrite" -} diff --git a/msazure_vm/host_provision/host_block_storage.tf b/msazure_vm/host_provision/host_block_storage.tf index ad26752..b6e4a83 100644 --- a/msazure_vm/host_provision/host_block_storage.tf +++ b/msazure_vm/host_provision/host_block_storage.tf @@ -1,163 +1,31 @@ # Create Block Storage -resource "azurerm_managed_disk" "block_volume_hana_data_voltype" { - count = var.module_var_disk_volume_type_hana_data != "custom" ? var.module_var_disk_volume_count_hana_data : 0 +resource "azurerm_managed_disk" "block_volume" { +# count = sum([ for storage_item in var.module_var_storage_definition: try(storage_item.disk_count,1) ]) - name = "${var.module_var_host_name}-volume-hana-data-${count.index}" - resource_group_name = local.target_resource_group_name - location = var.module_var_az_location_region - - // Premium SSD size (P), Standard SSD size (E), Standard HDD size (S) - storage_account_type = can(regex("^P.*", var.module_var_disk_volume_type_hana_data)) ? "Premium_LRS" : can(regex("^E.*", var.module_var_disk_volume_type_hana_data)) ? "StandardSSD_LRS" : can(regex("^S.*", var.module_var_disk_volume_type_hana_data)) ? "Standard_LRS" : "error" - tier = can(regex("^[P].*",var.module_var_disk_volume_type_hana_data)) ? var.module_var_disk_volume_type_hana_data : null - create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_hana_data - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" + for_each = { + for disk in flatten( + [ for storage_item in var.module_var_storage_definition: + [ for index, count in range(0,try(storage_item.disk_count,1)) : + tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) + ] + ] + ): + disk.name => disk } -} - - -resource "azurerm_managed_disk" "block_volume_hana_log_voltype" { - count = var.module_var_disk_volume_type_hana_log != "custom" ? var.module_var_disk_volume_count_hana_log : 0 - name = "${var.module_var_host_name}-volume-hana-log-${count.index}" + name = "${var.module_var_host_name}-vol-${each.value.name}" resource_group_name = local.target_resource_group_name location = var.module_var_az_location_region // Premium SSD size (P), Standard SSD size (E), Standard HDD size (S) - storage_account_type = can(regex("^P.*", var.module_var_disk_volume_type_hana_log)) ? "Premium_LRS" : can(regex("^E.*", var.module_var_disk_volume_type_hana_log)) ? "StandardSSD_LRS" : can(regex("^S.*", var.module_var_disk_volume_type_hana_log)) ? "Standard_LRS" : "error" - tier = can(regex("^[P].*",var.module_var_disk_volume_type_hana_log)) ? var.module_var_disk_volume_type_hana_log : null - + storage_account_type = can(regex("^P.*", try(each.value.disk_type, null))) ? "Premium_LRS" : can(regex("^E.*", try(each.value.disk_type, null))) ? "StandardSSD_LRS" : can(regex("^S.*", try(each.value.disk_type, null))) ? "Standard_LRS" : "error" + tier = can(regex("^[P].*",try(each.value.disk_type, null))) ? try(each.value.disk_type, null) : null create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_hana_log - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - -resource "azurerm_managed_disk" "block_volume_hana_shared_voltype" { - count = var.module_var_disk_volume_type_hana_shared != "custom" ? var.module_var_disk_volume_count_hana_shared : 0 - - name = "${var.module_var_host_name}-volume-hana-shared-${count.index}" - resource_group_name = local.target_resource_group_name - location = var.module_var_az_location_region - - // Premium SSD size (P), Standard SSD size (E), Standard HDD size (S) - storage_account_type = can(regex("^P.*", var.module_var_disk_volume_type_hana_shared)) ? "Premium_LRS" : can(regex("^E.*", var.module_var_disk_volume_type_hana_shared)) ? "StandardSSD_LRS" : can(regex("^S.*", var.module_var_disk_volume_type_hana_shared)) ? "Standard_LRS" : "error" - tier = can(regex("^[P].*",var.module_var_disk_volume_type_hana_shared)) ? var.module_var_disk_volume_type_hana_shared : null - create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_hana_shared - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - -resource "azurerm_managed_disk" "block_volume_anydb_voltype" { - count = var.module_var_disk_volume_type_anydb != "custom" ? var.module_var_disk_volume_count_anydb : 0 - - name = "${var.module_var_host_name}-volume-anydb-${count.index}" - resource_group_name = local.target_resource_group_name - location = var.module_var_az_location_region - - // Premium SSD size (P), Standard SSD size (E), Standard HDD size (S) - storage_account_type = can(regex("^P.*", var.module_var_disk_volume_type_anydb)) ? "Premium_LRS" : can(regex("^E.*", var.module_var_disk_volume_type_anydb)) ? "StandardSSD_LRS" : can(regex("^S.*", var.module_var_disk_volume_type_anydb)) ? "Standard_LRS" : "error" - tier = can(regex("^[P].*",var.module_var_disk_volume_type_anydb)) ? var.module_var_disk_volume_type_anydb : null - create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_anydb - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - + disk_size_gb = each.value.disk_size -resource "azurerm_managed_disk" "block_volume_usr_sap_voltype" { - count = var.module_var_disk_volume_count_usr_sap - - name = "${var.module_var_host_name}-volume-usr-sap-${count.index}" - resource_group_name = local.target_resource_group_name - location = var.module_var_az_location_region - - // Premium SSD size (P), Standard SSD size (E), Standard HDD size (S) - storage_account_type = can(regex("^P.*", var.module_var_disk_volume_type_usr_sap)) ? "Premium_LRS" : can(regex("^E.*", var.module_var_disk_volume_type_usr_sap)) ? "StandardSSD_LRS" : can(regex("^S.*", var.module_var_disk_volume_type_usr_sap)) ? "Standard_LRS" : "error" - tier = can(regex("^[P].*",var.module_var_disk_volume_type_usr_sap)) ? var.module_var_disk_volume_type_usr_sap : null - create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_usr_sap - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - -resource "azurerm_managed_disk" "block_volume_sapmnt_voltype" { - count = var.module_var_nfs_boolean_sapmnt ? 0 : var.module_var_disk_volume_count_sapmnt - - name = "${var.module_var_host_name}-volume-sapmnt-${count.index}" - resource_group_name = local.target_resource_group_name - location = var.module_var_az_location_region - - // Premium SSD size (P), Standard SSD size (E), Standard HDD size (S) - storage_account_type = can(regex("^P.*", var.module_var_disk_volume_type_sapmnt)) ? "Premium_LRS" : can(regex("^E.*", var.module_var_disk_volume_type_sapmnt)) ? "StandardSSD_LRS" : can(regex("^S.*", var.module_var_disk_volume_type_sapmnt)) ? "Standard_LRS" : "error" - tier = can(regex("^[P].*",var.module_var_disk_volume_type_sapmnt)) ? var.module_var_disk_volume_type_sapmnt : null - create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_sapmnt - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - -resource "azurerm_managed_disk" "block_volume_swap_voltype" { - count = var.module_var_disk_volume_count_swap - - name = "${var.module_var_host_name}-volume-swap-${count.index}" - resource_group_name = local.target_resource_group_name - location = var.module_var_az_location_region - - // Premium SSD size (P), Standard SSD size (E), Standard HDD size (S) - storage_account_type = can(regex("^P.*", var.module_var_disk_volume_type_swap)) ? "Premium_LRS" : can(regex("^E.*", var.module_var_disk_volume_type_swap)) ? "StandardSSD_LRS" : can(regex("^S.*", var.module_var_disk_volume_type_swap)) ? "Standard_LRS" : "error" - tier = can(regex("^[P].*",var.module_var_disk_volume_type_swap)) ? var.module_var_disk_volume_type_swap : null - create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_swap - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - -resource "azurerm_managed_disk" "block_volume_software_voltype" { - - name = "${var.module_var_host_name}-volume-software" - resource_group_name = local.target_resource_group_name - location = var.module_var_az_location_region - - storage_account_type = "Premium_LRS" - create_option = "Empty" - disk_size_gb = var.module_var_disk_volume_capacity_software + disk_iops_read_write = each.value.disk_iops # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP timeouts { diff --git a/msazure_vm/host_provision/module_variables.tf b/msazure_vm/host_provision/module_variables.tf index b091e5d..c715647 100644 --- a/msazure_vm/host_provision/module_variables.tf +++ b/msazure_vm/host_provision/module_variables.tf @@ -42,149 +42,12 @@ variable "module_var_bastion_connection_sg_id" {} variable "module_var_az_vm_instance" {} -variable "module_var_disk_volume_type_hana_data" {} -variable "module_var_disk_volume_count_hana_data" {} -variable "module_var_disk_volume_capacity_hana_data" {} -variable "module_var_disk_volume_iops_hana_data" { - default = null -} -variable "module_var_lvm_enable_hana_data" {} -variable "module_var_lvm_pv_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_data" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_data" { - default = "64K" -} -variable "module_var_filesystem_hana_data" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_data" {} - - -variable "module_var_disk_volume_type_hana_log" {} -variable "module_var_disk_volume_count_hana_log" {} -variable "module_var_disk_volume_capacity_hana_log" {} -variable "module_var_disk_volume_iops_hana_log" { - default = null -} -variable "module_var_lvm_enable_hana_log" {} -variable "module_var_lvm_pv_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_log" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_log" { - default = "64K" -} -variable "module_var_filesystem_hana_log" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_log" {} - - -variable "module_var_disk_volume_type_hana_shared" {} -variable "module_var_disk_volume_count_hana_shared" {} -variable "module_var_disk_volume_capacity_hana_shared" {} -variable "module_var_disk_volume_iops_hana_shared" { - default = null -} -variable "module_var_lvm_enable_hana_shared" {} -variable "module_var_lvm_pv_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_shared" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_shared" { - default = "64K" -} -variable "module_var_filesystem_hana_shared" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_shared" {} - -variable "module_var_disk_volume_type_anydb" {} -variable "module_var_disk_volume_count_anydb" {} -variable "module_var_disk_volume_capacity_anydb" {} -variable "module_var_disk_volume_iops_anydb" { - default = null -} -variable "module_var_lvm_enable_anydb" { - default = false -} -variable "module_var_lvm_pv_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_anydb" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_anydb" { - default = "64K" -} -variable "module_var_filesystem_mount_path_anydb" { -} -variable "module_var_filesystem_anydb" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_anydb" { - default = "4k" -} - - -variable "module_var_disk_volume_count_usr_sap" {} -variable "module_var_disk_volume_type_usr_sap" {} -variable "module_var_disk_volume_capacity_usr_sap" {} -variable "module_var_filesystem_usr_sap" { - default = "xfs" -} - - -variable "module_var_disk_volume_count_sapmnt" {} -variable "module_var_disk_volume_type_sapmnt" {} -variable "module_var_disk_volume_capacity_sapmnt" {} -variable "module_var_filesystem_sapmnt" { - default = "xfs" -} -variable "module_var_nfs_boolean_sapmnt" {} -variable "module_var_nfs_fqdn_sapmnt" {} - - -variable "module_var_disk_swapfile_size_gb" {} -variable "module_var_disk_volume_count_swap" {} -variable "module_var_disk_volume_type_swap" {} -variable "module_var_disk_volume_capacity_swap" {} -variable "module_var_filesystem_swap" { - default = "xfs" -} - - -variable "module_var_sap_software_download_directory" { - default = "/software" -} -variable "module_var_disk_volume_capacity_software" { - default = 525 -} - variable "module_var_dns_zone_name" {} variable "module_var_dns_root_domain_name" {} +variable "module_var_storage_definition" {} + variable "module_var_disable_ip_anti_spoofing" { default = false } From 8a07ef6e9a4307a1cbbf5bfcfd423757017cde3e Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Sat, 20 May 2023 09:29:34 +0100 Subject: [PATCH 07/25] fix: add ip forward to gcp --- gcp_ce_vm/host_provision/host.tf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gcp_ce_vm/host_provision/host.tf b/gcp_ce_vm/host_provision/host.tf index adc5f7e..7db6e23 100644 --- a/gcp_ce_vm/host_provision/host.tf +++ b/gcp_ce_vm/host_provision/host.tf @@ -15,6 +15,8 @@ resource "google_compute_instance" "host" { } } + can_ip_forward = var.module_var_disable_ip_anti_spoofing // When disable the Anti IP Spoofing = true, then Can IP Forward = true + network_interface { # name = "${var.module_var_resource_prefix}-bastion-nic0" subnetwork = local.target_vpc_subnet_id From 4d4d988a13e351cb04619c1680eb455e5512f669 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Sat, 2 Sep 2023 14:38:06 +0100 Subject: [PATCH 08/25] fix: stability of gcp bastion --- gcp_ce_vm/bastion_inject/bastion_provision.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gcp_ce_vm/bastion_inject/bastion_provision.tf b/gcp_ce_vm/bastion_inject/bastion_provision.tf index 7892d32..aed53d3 100644 --- a/gcp_ce_vm/bastion_inject/bastion_provision.tf +++ b/gcp_ce_vm/bastion_inject/bastion_provision.tf @@ -3,7 +3,7 @@ resource "google_compute_instance" "bastion_host" { name = "${var.module_var_resource_prefix}-bastion" - machine_type = "e2-micro" + machine_type = "e2-standard-2" zone = var.module_var_gcp_region_zone boot_disk { From 7c125c06a86d54921743f002c56552f4eb82112b Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Mon, 23 Oct 2023 14:54:53 +0100 Subject: [PATCH 09/25] fix: squid config ports --- .../build_proxy_web_forward_squid.tf | 47 ++++++++++--------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/ibmcloud_vs/powervs_interconnect_proxy_provision/build_proxy_web_forward_squid.tf b/ibmcloud_vs/powervs_interconnect_proxy_provision/build_proxy_web_forward_squid.tf index a42c85c..796f903 100644 --- a/ibmcloud_vs/powervs_interconnect_proxy_provision/build_proxy_web_forward_squid.tf +++ b/ibmcloud_vs/powervs_interconnect_proxy_provision/build_proxy_web_forward_squid.tf @@ -18,30 +18,35 @@ resource "null_resource" "squid_files" { # Adapt to list your (internal) IP networks from where browsing # should be allowed acl localnet src 0.0.0.1-0.255.255.255 # RFC 1122 "this" network (LAN) -acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN) -acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN) -acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines -acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN) -acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN) -acl localnet src fc00::/7 # RFC 4193 local private network range -acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines - -acl SSL_ports port 443 -acl Safe_ports port 80 # http -acl Safe_ports port 21 # ftp -acl Safe_ports port 443 # https -acl Safe_ports port 70 # gopher -acl Safe_ports port 210 # wais -acl Safe_ports port 1025-65535 # unregistered ports -acl Safe_ports port 280 # http-mgmt -acl Safe_ports port 488 # gss-http -acl Safe_ports port 591 # filemaker -acl Safe_ports port 777 # multiling http +acl localnet src 10.0.0.0/8 # RFC 1918 local private network (LAN) +acl localnet src 172.16.0.0/12 # RFC 1918 local private network (LAN) +acl localnet src 192.168.0.0/16 # RFC 1918 local private network (LAN) +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines +acl localnet src 100.64.0.0/10 # RFC 6598 shared address space (CGN) +acl localnet src all + +acl FTP_ports port 21 20 +acl SSL_ports port 443 # ssl +acl SSL_ports port 8443 # ssl (os package mirrors) +acl Safe_ports port 22 # ssh +acl Safe_ports port 80 # http +acl Safe_ports port 443 # https +acl Safe_ports port 8443 # https alt +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +#acl Safe_ports port 1025-65535 # unregistered ports acl CONNECT method CONNECT # # Recommended minimum Access Permission configuration: # + # Deny requests to certain unsafe ports http_access deny !Safe_ports @@ -70,8 +75,8 @@ http_access allow localhost # And finally deny all other access to this proxy #http_access deny all -# ENABLE: public internet forward proxy for HTTP/HTTPS -http_access allow all +# OVERRIDE: public internet forward proxy for HTTP/HTTPS +#http_access allow all # Squid normally listens to port 3128 http_port ${var.module_var_proxy_port_squid} From 3de0245a69e73999b7cd7a5bd5693e136072b6b9 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Mon, 30 Oct 2023 20:57:07 +0000 Subject: [PATCH 10/25] fix: split create of powervs workspace and networks --- .../data_network_vpc_target_subnet.tf | 22 ----------- .../module_outputs.tf | 4 -- .../module_variables.tf | 10 +++++ .../module_versions.tf | 0 .../network_interconnect.tf | 4 +- .../network_power_group.tf | 11 ++---- .../data_network_vpc_target_subnet.tf | 11 ++++++ .../module_outputs.tf | 8 ++++ .../module_variables.tf | 0 .../module_versions.tf | 38 +++++++++++++++++++ .../power_service_instance.tf | 0 11 files changed, 72 insertions(+), 36 deletions(-) delete mode 100644 ibmcloud_powervs/account_bootstrap_addon/data_network_vpc_target_subnet.tf rename ibmcloud_powervs/{account_bootstrap_addon => account_bootstrap_powervs_networks}/module_outputs.tf (77%) create mode 100644 ibmcloud_powervs/account_bootstrap_powervs_networks/module_variables.tf rename ibmcloud_powervs/{account_bootstrap_addon => account_bootstrap_powervs_networks}/module_versions.tf (100%) rename ibmcloud_powervs/{account_bootstrap_addon => account_bootstrap_powervs_networks}/network_interconnect.tf (92%) rename ibmcloud_powervs/{account_bootstrap_addon => account_bootstrap_powervs_networks}/network_power_group.tf (75%) create mode 100644 ibmcloud_powervs/account_bootstrap_powervs_workspace/data_network_vpc_target_subnet.tf create mode 100644 ibmcloud_powervs/account_bootstrap_powervs_workspace/module_outputs.tf rename ibmcloud_powervs/{account_bootstrap_addon => account_bootstrap_powervs_workspace}/module_variables.tf (100%) create mode 100644 ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf rename ibmcloud_powervs/{account_bootstrap_addon => account_bootstrap_powervs_workspace}/power_service_instance.tf (100%) diff --git a/ibmcloud_powervs/account_bootstrap_addon/data_network_vpc_target_subnet.tf b/ibmcloud_powervs/account_bootstrap_addon/data_network_vpc_target_subnet.tf deleted file mode 100644 index 09efb59..0000000 --- a/ibmcloud_powervs/account_bootstrap_addon/data_network_vpc_target_subnet.tf +++ /dev/null @@ -1,22 +0,0 @@ - -# Select Subnet on the VPC in a specific Zone -### Also identifies target VPC and target Resource Group - -data "ibm_is_subnet" "vpc_subnet" { - name = var.module_var_ibmcloud_vpc_subnet_name // Requires unique name in account -} - -data "ibm_is_vpc" "vpc" { - name = data.ibm_is_subnet.vpc_subnet.vpc_name // Requires unique name in account -} - - -locals { - target_resource_group_id = data.ibm_is_subnet.vpc_subnet.resource_group - target_vpc_id = data.ibm_is_subnet.vpc_subnet.vpc - target_vpc_crn = data.ibm_is_vpc.vpc.crn - target_vpc_availability_zone = data.ibm_is_subnet.vpc_subnet.zone - target_vpc_subnet_range = data.ibm_is_subnet.vpc_subnet.ipv4_cidr_block - target_region = replace(data.ibm_is_subnet.vpc_subnet.zone, "/-[^-]*$/", "") - target_subnet_id = data.ibm_is_subnet.vpc_subnet.id -} diff --git a/ibmcloud_powervs/account_bootstrap_addon/module_outputs.tf b/ibmcloud_powervs/account_bootstrap_powervs_networks/module_outputs.tf similarity index 77% rename from ibmcloud_powervs/account_bootstrap_addon/module_outputs.tf rename to ibmcloud_powervs/account_bootstrap_powervs_networks/module_outputs.tf index 4527122..c347777 100644 --- a/ibmcloud_powervs/account_bootstrap_addon/module_outputs.tf +++ b/ibmcloud_powervs/account_bootstrap_powervs_networks/module_outputs.tf @@ -1,8 +1,4 @@ -output "output_power_group_guid" { - value = ibm_resource_instance.power_group.guid -} - output "output_power_group_networks" { value = compact([ ibm_pi_network.power_group_network_private.network_id, diff --git a/ibmcloud_powervs/account_bootstrap_powervs_networks/module_variables.tf b/ibmcloud_powervs/account_bootstrap_powervs_networks/module_variables.tf new file mode 100644 index 0000000..6b135d0 --- /dev/null +++ b/ibmcloud_powervs/account_bootstrap_powervs_networks/module_variables.tf @@ -0,0 +1,10 @@ + +variable "module_var_resource_prefix" {} + +variable "module_var_resource_group_id" {} + +variable "module_var_ibmcloud_power_zone" {} + +variable "module_var_ibmcloud_vpc_crn" {} + +variable "module_var_ibmcloud_powervs_workspace_guid" {} diff --git a/ibmcloud_powervs/account_bootstrap_addon/module_versions.tf b/ibmcloud_powervs/account_bootstrap_powervs_networks/module_versions.tf similarity index 100% rename from ibmcloud_powervs/account_bootstrap_addon/module_versions.tf rename to ibmcloud_powervs/account_bootstrap_powervs_networks/module_versions.tf diff --git a/ibmcloud_powervs/account_bootstrap_addon/network_interconnect.tf b/ibmcloud_powervs/account_bootstrap_powervs_networks/network_interconnect.tf similarity index 92% rename from ibmcloud_powervs/account_bootstrap_addon/network_interconnect.tf rename to ibmcloud_powervs/account_bootstrap_powervs_networks/network_interconnect.tf index 38f74fd..1fef33f 100644 --- a/ibmcloud_powervs/account_bootstrap_addon/network_interconnect.tf +++ b/ibmcloud_powervs/account_bootstrap_powervs_networks/network_interconnect.tf @@ -10,7 +10,7 @@ resource "ibm_pi_cloud_connection" "cloud_connection" { null_resource.sleep_temp_network ] - pi_cloud_instance_id = ibm_resource_instance.power_group.guid + pi_cloud_instance_id = var.module_var_ibmcloud_powervs_workspace_guid pi_cloud_connection_name = "${var.module_var_resource_prefix}-pwr-to-cld" pi_cloud_connection_global_routing = false pi_cloud_connection_speed = 200 // Mbps @@ -18,7 +18,7 @@ resource "ibm_pi_cloud_connection" "cloud_connection" { pi_cloud_connection_vpc_enabled = true pi_cloud_connection_vpc_crns = [ - local.target_vpc_crn + var.module_var_ibmcloud_vpc_crn ] pi_cloud_connection_networks = [ diff --git a/ibmcloud_powervs/account_bootstrap_addon/network_power_group.tf b/ibmcloud_powervs/account_bootstrap_powervs_networks/network_power_group.tf similarity index 75% rename from ibmcloud_powervs/account_bootstrap_addon/network_power_group.tf rename to ibmcloud_powervs/account_bootstrap_powervs_networks/network_power_group.tf index bde112d..0873250 100644 --- a/ibmcloud_powervs/account_bootstrap_addon/network_power_group.tf +++ b/ibmcloud_powervs/account_bootstrap_powervs_networks/network_power_group.tf @@ -9,12 +9,8 @@ # Create IBM Power Virtual Server Group Private Network Subnet resource "ibm_pi_network" "power_group_network_private" { - depends_on = [ - null_resource.sleep_temp - ] - pi_network_name = "${var.module_var_resource_prefix}-power-group-network-private" - pi_cloud_instance_id = ibm_resource_instance.power_group.guid // Also shown by running either > ibmcloud resource service-instances --long or ibmcloud pi service-list + pi_cloud_instance_id = var.module_var_ibmcloud_powervs_workspace_guid // Also shown by running either > ibmcloud resource service-instances --long or ibmcloud pi service-list pi_network_type = "vlan" pi_cidr = "192.168.0.32/27" pi_dns = ["127.0.0.1"] @@ -33,11 +29,10 @@ resource "null_resource" "sleep_temp_network" { # Create IBM Power Virtual Server Group Private Network Subnet for Management networking resource "ibm_pi_network" "power_group_network_mgmt" { depends_on = [ - null_resource.sleep_temp, ibm_pi_network.power_group_network_private ] pi_network_name = "${var.module_var_resource_prefix}-power-group-network-mgmt" - pi_cloud_instance_id = ibm_resource_instance.power_group.guid // Also shown by running either > ibmcloud resource service-instances --long or ibmcloud pi service-list + pi_cloud_instance_id = var.module_var_ibmcloud_powervs_workspace_guid // Also shown by running either > ibmcloud resource service-instances --long or ibmcloud pi service-list pi_network_type = "vlan" pi_cidr = "192.168.0.0/27" pi_dns = ["127.0.0.1"] @@ -52,6 +47,6 @@ resource "ibm_pi_network" "power_group_network_mgmt" { # ] # # pi_network_name = "${var.module_var_resource_prefix}-power-group-network-public" -# pi_cloud_instance_id = ibm_resource_instance.power_group.guid // Also shown by running either > ibmcloud resource service-instances --long or ibmcloud pi service-list +# pi_cloud_instance_id = var.module_var_ibmcloud_powervs_workspace_guid // Also shown by running either > ibmcloud resource service-instances --long or ibmcloud pi service-list # pi_network_type = "pub-vlan" #} diff --git a/ibmcloud_powervs/account_bootstrap_powervs_workspace/data_network_vpc_target_subnet.tf b/ibmcloud_powervs/account_bootstrap_powervs_workspace/data_network_vpc_target_subnet.tf new file mode 100644 index 0000000..577fc67 --- /dev/null +++ b/ibmcloud_powervs/account_bootstrap_powervs_workspace/data_network_vpc_target_subnet.tf @@ -0,0 +1,11 @@ + +# Select Subnet on the VPC in a specific Zone +### Also identifies target VPC and target Resource Group + +data "ibm_is_subnet" "vpc_subnet" { + name = var.module_var_ibmcloud_vpc_subnet_name // Requires unique name in account +} + +data "ibm_is_vpc" "vpc" { + name = data.ibm_is_subnet.vpc_subnet.vpc_name // Requires unique name in account +} diff --git a/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_outputs.tf b/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_outputs.tf new file mode 100644 index 0000000..f66d2a6 --- /dev/null +++ b/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_outputs.tf @@ -0,0 +1,8 @@ + +output "output_power_group_guid" { + value = ibm_resource_instance.power_group.guid +} + +output "output_power_target_vpc_crn" { + value = data.ibm_is_vpc.vpc.crn +} diff --git a/ibmcloud_powervs/account_bootstrap_addon/module_variables.tf b/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_variables.tf similarity index 100% rename from ibmcloud_powervs/account_bootstrap_addon/module_variables.tf rename to ibmcloud_powervs/account_bootstrap_powervs_workspace/module_variables.tf diff --git a/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf b/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf new file mode 100644 index 0000000..a2fe859 --- /dev/null +++ b/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf @@ -0,0 +1,38 @@ +# Terraform declaration + +terraform { + required_version = ">= 1.0" + required_providers { + ibm = { + #source = "localdomain/provider/ibm" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/ibm/1.xx.xx/darwin_amd6 + source = "IBM-Cloud/ibm" // Terraform Registry + version = ">=1.45.0" + } + } +} + + +# Terraform Provider declaration +# +# Nested provider configurations cannot be used with depends_on meta-argument between modules +# +# The calling module block can use either: +# - "providers" argument in the module block +# - none, inherit default (un-aliased) provider configuration +# +# Therefore the below is blank and is only for reference if this module needs to be executed manually + +#provider "ibm" { +# +# Define Provider inputs manually +# ibmcloud_api_key = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +# +# Define Provider inputs from given Terraform Variables +# ibmcloud_api_key = var.ibmcloud_api_key +# +# If using IBM Cloud Automation Manager, the Provider declaration values are populated automatically +# from the Cloud Connection credentials (by using Environment Variables) +# +# If using IBM Cloud Schematics, the Provider declaration values are populated automatically +# +#} diff --git a/ibmcloud_powervs/account_bootstrap_addon/power_service_instance.tf b/ibmcloud_powervs/account_bootstrap_powervs_workspace/power_service_instance.tf similarity index 100% rename from ibmcloud_powervs/account_bootstrap_addon/power_service_instance.tf rename to ibmcloud_powervs/account_bootstrap_powervs_workspace/power_service_instance.tf From fe42da51a1415faa177841543a40450e43155bb1 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Mon, 30 Oct 2023 21:01:47 +0000 Subject: [PATCH 11/25] fix: append notes --- .../module_versions.tf | 19 +++++++++++++++++++ .../module_versions.tf | 19 +++++++++++++++++++ .../host_provision/module_versions.tf | 19 +++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/ibmcloud_powervs/account_bootstrap_powervs_networks/module_versions.tf b/ibmcloud_powervs/account_bootstrap_powervs_networks/module_versions.tf index a2fe859..fd6fbb6 100644 --- a/ibmcloud_powervs/account_bootstrap_powervs_networks/module_versions.tf +++ b/ibmcloud_powervs/account_bootstrap_powervs_networks/module_versions.tf @@ -24,6 +24,8 @@ terraform { #provider "ibm" { # +# alias = "standard" +# # Define Provider inputs manually # ibmcloud_api_key = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" # @@ -35,4 +37,21 @@ terraform { # # If using IBM Cloud Schematics, the Provider declaration values are populated automatically # +# +# region = local.ibmcloud_region +# +# zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only +# +#} + +#provider "ibm" { +# +# alias = "powervs_secure" +# +# ibmcloud_api_key = var.ibmcloud_api_key +# +# region = local.ibmcloud_powervs_region +# +# zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only +# #} diff --git a/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf b/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf index a2fe859..fd6fbb6 100644 --- a/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf +++ b/ibmcloud_powervs/account_bootstrap_powervs_workspace/module_versions.tf @@ -24,6 +24,8 @@ terraform { #provider "ibm" { # +# alias = "standard" +# # Define Provider inputs manually # ibmcloud_api_key = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" # @@ -35,4 +37,21 @@ terraform { # # If using IBM Cloud Schematics, the Provider declaration values are populated automatically # +# +# region = local.ibmcloud_region +# +# zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only +# +#} + +#provider "ibm" { +# +# alias = "powervs_secure" +# +# ibmcloud_api_key = var.ibmcloud_api_key +# +# region = local.ibmcloud_powervs_region +# +# zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only +# #} diff --git a/ibmcloud_powervs/host_provision/module_versions.tf b/ibmcloud_powervs/host_provision/module_versions.tf index a2fe859..fd6fbb6 100644 --- a/ibmcloud_powervs/host_provision/module_versions.tf +++ b/ibmcloud_powervs/host_provision/module_versions.tf @@ -24,6 +24,8 @@ terraform { #provider "ibm" { # +# alias = "standard" +# # Define Provider inputs manually # ibmcloud_api_key = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" # @@ -35,4 +37,21 @@ terraform { # # If using IBM Cloud Schematics, the Provider declaration values are populated automatically # +# +# region = local.ibmcloud_region +# +# zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only +# +#} + +#provider "ibm" { +# +# alias = "powervs_secure" +# +# ibmcloud_api_key = var.ibmcloud_api_key +# +# region = local.ibmcloud_powervs_region +# +# zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only +# #} From e67e9ba0d10c352c0d1fac3ae88751e7ec10c5ad Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Thu, 7 Dec 2023 22:21:38 +0000 Subject: [PATCH 12/25] fix: storage def for all platforms --- .../host_provision/build_execution.tf | 6 +- .../host_provision/build_filesystem_setup.tf | 625 ----------------- ibmcloud_powervs/host_provision/host.tf | 24 +- .../host_provision/host_block_storage.tf | 130 +--- .../host_provision/module_variables.tf | 130 +--- ibmcloud_vs/host_provision/build_execution.tf | 2 - .../host_provision/build_filesystem_setup.tf | 589 ---------------- ibmcloud_vs/host_provision/host.tf | 26 +- .../host_provision/host_block_storage.tf | 204 +----- .../host_provision/module_variables.tf | 141 +--- ibmpowervc/host_provision/build_execution.tf | 12 +- .../host_provision/build_filesystem_setup.tf | 633 ------------------ ibmpowervc/host_provision/build_os_prepare.tf | 8 +- .../host_provision/build_os_subscriptions.tf | 8 +- .../build_web_proxy_noninteractive.tf | 8 +- ibmpowervc/host_provision/host.tf | 67 +- ibmpowervc/host_provision/host_storage.tf | 160 +---- ibmpowervc/host_provision/module_variables.tf | 121 +--- ovirt_kvm_vm/.gitkeep | 0 vmware_vm/host_provision/build_execution.tf | 13 +- .../host_provision/build_filesystem_setup.tf | 575 ---------------- vmware_vm/host_provision/host.tf | 11 +- vmware_vm/host_provision/host_storage.tf | 146 +--- vmware_vm/host_provision/module_variables.tf | 119 +--- 24 files changed, 103 insertions(+), 3655 deletions(-) delete mode 100644 ibmcloud_powervs/host_provision/build_filesystem_setup.tf delete mode 100644 ibmcloud_vs/host_provision/build_filesystem_setup.tf delete mode 100644 ibmpowervc/host_provision/build_filesystem_setup.tf delete mode 100644 ovirt_kvm_vm/.gitkeep delete mode 100644 vmware_vm/host_provision/build_filesystem_setup.tf diff --git a/ibmcloud_powervs/host_provision/build_execution.tf b/ibmcloud_powervs/host_provision/build_execution.tf index 38ea590..563893a 100644 --- a/ibmcloud_powervs/host_provision/build_execution.tf +++ b/ibmcloud_powervs/host_provision/build_execution.tf @@ -7,8 +7,7 @@ resource "null_resource" "execute_os_scripts" { null_resource.build_script_os_prepare, null_resource.build_script_web_proxy_noninteractive, null_resource.os_subscription_files, - null_resource.dns_resolv_files, - null_resource.build_script_fs_init + null_resource.dns_resolv_files ] # Execute, including all files provisioned by Terraform into $HOME @@ -23,8 +22,7 @@ resource "null_resource" "execute_os_scripts" { "echo 'Change DNS in resolv.conf'", "if [ -f /tmp/resolv.conf ]; then mv /etc/resolv.conf /etc/resolv.conf.backup && mv /tmp/resolv.conf /etc/ ; fi", "chmod 644 /etc/resolv.conf", - "$HOME/terraform_dig.sh", - "$HOME/terraform_fs_init.sh" + "$HOME/terraform_dig.sh" ] } diff --git a/ibmcloud_powervs/host_provision/build_filesystem_setup.tf b/ibmcloud_powervs/host_provision/build_filesystem_setup.tf deleted file mode 100644 index 3d4ab0f..0000000 --- a/ibmcloud_powervs/host_provision/build_filesystem_setup.tf +++ /dev/null @@ -1,625 +0,0 @@ - -resource "null_resource" "build_script_fs_init" { - - # Specify the ssh connection - connection { - # The Bastion host ssh connection is established first, and then the host connection will be made from there. - # Checking Host Key is false when not using bastion_host_key - type = "ssh" - user = "root" - host = ibm_pi_instance.host_via_certified_profile.pi_network[0].ip_address - private_key = var.module_var_host_private_ssh_key - bastion_host = var.module_var_bastion_ip - #bastion_host_key = - bastion_port = var.module_var_bastion_ssh_port - bastion_user = var.module_var_bastion_user - bastion_private_key = var.module_var_bastion_private_ssh_key - } - - # Path must already exist and must not use Bash shell special variable, e.g. cannot use $HOME/file.sh - # "By default, OpenSSH's scp implementation runs in the remote user's home directory and so you can specify a relative path to upload into that home directory" - # https://www.terraform.io/language/resources/provisioners/file#destination-paths - provisioner "file" { - destination = "terraform_fs_init.sh" - content = <1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - #### - # Create LVM Physical Volumes - # - # This initialises the whole Disk or a Disk Partition as LVM Physical Volumes for use as part of LVM Logical Volumes - # - # First physical extent begins at 1MB which is defined by default_data_alignment in lvm.conf and this can be overriden by --dataalignment. - # Default 1MB offset from disk start before first LVM PV Physical Extent is used, - # and an additional offset after can be set using --dataalignmentoffset. - # - # I/O from the LVM Volume Group to the LVM Physical Volume will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Physical Volume data alignment offset - #### - - - #### - # IBM Power VS other storage notes and commands - #### - # - # IBM Power VS: IF multipath_component_detection in /etc/lvm/lvm.conf is set to = 1 (enable) - # this results in LVM pvcreate error: - # Cannot use /dev/sdX: device is a multipath component - # - # IBM Power VS: IF multipath_component_detection in /etc/lvm/lvm.conf is set to = 0 (disable) - # this results in LVM pvcreate error: - # Can't open /dev/sdX exclusively. Mounted filesystem? - # Can't open /dev/sdX exclusively. Mounted filesystem? - # - # IBM Power VS: Show multipath devices: - # multipath -l - # - # IBM Power VS: Map Disk Volumes to Device Mapper (DM) Multipath - # multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\" - # - # Each Block Storage volume has 1 Multipath DM, which contains 2 path groups each with 4 paths (i.e. total of 8 device paths for 1 volume, such as /dev/sdb to /dev/sdi) - - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = "$disk_capacity_gb_specified" ]] - then - echo "Creating LVM Physical Volume for /dev/$disk_id using data alignment offset $lvm_pv_data_alignment" - - echo "IBM Power VS: Detecting Multipath DM" - multipath_map=$(multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\" | sed "s/^/'/" | sed "s/ dm/',dm/" | sed "s/\(.*\) /\1,/") - multipath_dm_name=$(printf "$multipath_map" | awk -F',' "/$disk_id/ { print \$2 }") - echo "/dev/$disk_id is multipath /dev/$multipath_dm_name, using this for pvcreate" - pvcreate "/dev/$multipath_dm_name" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$multipath_dm_name to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$multipath_dm_name" & echo $lvm_volume_group_target_list) - - #pvcreate "/dev/$disk_id" --dataalignment $lvm_pv_data_alignment - #echo "Adding /dev/$disk_id to a list for the LVM Volume Group for $mount_point" - #lvm_volume_group_target_list=$(echo "/dev/$disk_id" & echo $lvm_volume_group_target_list) - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - - #### - # Create LVM Volume Groups and add LVM Physical Volumes - # Default is 1MiB offset from disk start before first LVM VG Physical Extent is used - # Default is 4MiB for the physical extent size (aka. block size), once set this is difficult to change - # - # I/O from the LVM Logical Volume to the LVM Volume Group will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Volume Group data alignment offset - # - # Therefore the LVM Volume Group extent size acts as the block size from LVM virtualization to the physical disks - #### - - lvm_volume_group_target_list=$(echo $lvm_volume_group_target_list | tr ' ' '\n' | sort -u) - echo "Creating $lvm_volume_group_name volume group with $(echo $lvm_volume_group_target_list | tr -d '\n'), using $lvm_volume_group_data_alignment data alignment and $lvm_volume_group_physical_extent_size extent size (block size)" - vgcreate --dataalignment $lvm_volume_group_data_alignment --physicalextentsize $lvm_volume_group_physical_extent_size $lvm_volume_group_name $(echo $lvm_volume_group_target_list | tr -d '\n') - echo "" - - ####### - # Create expandable LVM Logical Volume, using single or multiple physical disk volumes - # Default is 64K for the stripe size (aka. block size) - # - # I/O from the OS/Applications to the LVM Logical Volume will use the stripe size defined - # - # Therefore the LVM Logical Volume stripe size acts as the block size from OS to LVM virtualization - # IMPORTANT: Correct setting of this stripe size has impact on performance of OS and Applications read/write - ####### - - # Count number of LVM Physical Volumes in the LVM Volume Group - count_physical_volumes=$(echo "$lvm_volume_group_target_list" | wc -w) - - # Create LVM Logical Volume - # Stripe across all LVM Physical Volumes available in the LVM Volume Group - echo "Creating $lvm_logical_volume_name logical volume for $lvm_volume_group_name volume group, using $lvm_logical_volume_stripe_size extent size (block size)" - lvcreate $lvm_volume_group_name --yes --extents "100%FREE" --stripesize $lvm_logical_volume_stripe_size --stripes $count_physical_volumes --name "$lvm_logical_volume_name" - echo "" - - - ####### - # Create File System formatting for the LVM Logical Volume - # Filesystem is either XFS or EXT4 - ####### - - echo "Create File System formatting for the LVM Logical Volume" - mkfs.$filesystem_format "/dev/$lvm_volume_group_name/$lvm_logical_volume_name" - echo "" - - - ####### - # Permenant mount point - ####### - - # Note: After enabling multipath on the Linux host and rebooting the system, disk paths might appear in “/dev/UUID” form with a unique alphanumeric identifier. - # This can be seen by using the “lsblk” command on Linux. The preferred method is to use this disk path as opposed to the “/dev/sdX” path when formatting and mounting file systems. - - # Note: When adding an /etc/fstab entry for iSCSI based disk devices, use the “_netdev” mount option to ensure - # that the network link is ready before the operating system attempts to mount the disk. - - echo "Create fstab entries for $lvm_volume_group_name" - echo "# fstab entries for $lvm_volume_group_name" >> /etc/fstab - echo "/dev/$lvm_volume_group_name/$lvm_logical_volume_name $mount_point $filesystem_format defaults,noatime 0 0" >> /etc/fstab - echo "" - -} - - - - -############################################# -# Physical Volume Partition formatting -############################################# - -function physical_volume_partition_runner() { - - mount_point="$1" - disk_capacity_gb_specified="$2" - physical_partition_filesystem_block_size="$3" - physical_partition_name="$4" - filesystem_format="$5" - - # Ensure directory is available - mkdir --parents $mount_point - - # Clear any previous data entries on previously formatted disks - unset existing_disks_list - unset lvm_volume_group_target_list - unset physical_disks_list_with_gigabytes - - # Find existing disk devices and partitions - for disk in $(blkid -o device) - do - existing_disk_no_partition=$(echo "$disk" | sed 's/[0-9]\+$//') - export existing_disks_list=$(echo $existing_disk_no_partition & echo $existing_disks_list) - unset existing_disk_no_partition - done - - # Run calculations - physical_disks_list=$(lsblk --nodeps --bytes --noheadings -io KNAME,FSTYPE | awk 'BEGIN{OFS="\t"} {if (FNR>1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - if [[ $filesystem_format == "xfs" ]] - then - echo "#### XFS on Linux supports only filesystems with block sizes EQUAL to the system page size. ####" - echo "#### The disk can be formatted with up to 64 KiB, however it will fail to mount with the following error ####" - echo "# mount(2) system call failed: Function not implemented." - echo "" - echo "#### The default page size is hardcoded and cannot be changed. ####" - echo "" - echo "#### Red Hat KB: What is the maximum supported XFS block size in RHEL? - https://access.redhat.com/solutions/1614393 ####" - echo "#### Red Hat KB: Is it possible to change Page Size in Red Hat Enterprise Linux? - https://access.redhat.com/solutions/4854441 ####" - echo "" - echo "Page Size currently set to:" - getconf PAGESIZE - echo "" - fi - - page_size=$(getconf PAGESIZE) - - if [[ $filesystem_format == "xfs" ]] && [[ $(( page_size/1024 )) != $(echo $physical_partition_filesystem_block_size | sed 's/[^0-9]*//g') ]] - then - echo "Requested XFS Block Sizes are not equal to the Page Size, amend to Page Size" - echo "$mount_point requested as xfs with block size $physical_partition_filesystem_block_size, resetting to $page_size" - block_size_definition=$page_size - else - block_size_definition=$physical_partition_filesystem_block_size - fi - - - # Mount options for filesystem table. - # With only 4 KiB Page Size, only 2 in-memory log buffers are available so increase to each buffer's size (default 32kc) may increase performance - mount_options="defaults,noatime" - #mount_options="defaults,logbsize=256k" - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $disk_capacity_gb_specified ]] - then - echo "Creating Whole Disk Physical Volume Partition and File System for /dev/$disk_id at $mount_point with GPT Partition Table, start at 1MiB" - - echo "IBM Power VS: Detecting Multipath DM" - multipath_map=$(multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\" | sed "s/^/'/" | sed "s/ dm/',dm/" | sed "s/\(.*\) /\1,/") - multipath_dm_name=$(printf "$multipath_map" | awk -F',' "/$disk_id/ { print \$2 }") - echo "/dev/$disk_id is multipath /dev/$multipath_dm_name, using this for parted and mkfs" - exist_test=$(parted --machine --script /dev/$multipath_dm_name print) - if [[ ! -z $(echo $exist_test | grep $filesystem_format) ]]; then echo 'Already exists, skipping partition creation and formatting...' && continue ; else echo 'Partition Table does not contain physical partition, executing...' ; fi - parted --script /dev/$multipath_dm_name \ - mklabel gpt \ - mkpart primary $filesystem_format 1MiB 100% \ - name 1 $physical_partition_name - echo "Format Disk Partition with File System, with block size $block_size_definition" - partition_id=$(lsblk /dev/$disk_id -o NAME,TYPE --raw | awk "/part/ { print \$1 }" | awk '!/sd/') - echo "Disk Partition ID = $partition_id" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/mapper/$partition_id - echo "Write Mount Points to Linux File System Table" - # PhysicalDiskUUID=$(blkid /dev/$multipath_dm_name -s PTUUID -o value) - echo "/dev/mapper/$partition_id $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - - #parted --script /dev/$disk_id \ - # mklabel gpt \ - # mkpart primary $filesystem_format 1MiB 100% \ - # name 1 $physical_partition_name - #echo "Format Disk Partition with File System, with block size $block_size_definition" - #mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/$disk_id - #echo "Write Mount Points to Linux File System Table" - #PhysicalDiskUUID=$(blkid /dev/$disk_id -sUUID -ovalue) - #echo "UUID=$PhysicalDiskUUID $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Swap file or partition -############################################# - -function create_swap_file() { - - echo "Create swapfile" - - swap_gb="$1" - swap_bs="128" - - swap_calc_bs=$swap_bs"M" - swap_calc_count="$((x=$swap_gb*1024,x/$swap_bs))" - dd if=/dev/zero of=/swapfile bs=$swap_calc_bs count=$swap_calc_count - chmod 600 /swapfile - mkswap /swapfile - swapon /swapfile - echo '/swapfile swap swap defaults 0 0' >> /etc/fstab - swapon --show - free -h - -} - - -function create_swap_partition() { - - find_swap_partition_by_size="$1" - - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $find_swap_partition_by_size ]] - then - echo "Create swap partition" - mkswap /dev/$disk_id - swapon /dev/$disk_id - echo "/dev/$disk_id swap swap defaults 0 0" >> /etc/fstab - swapon --show - free -h - echo "" - break - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Verify/Debug -############################################# - -storage_debug="false" - -function storage_debug_run() { - -if [ "$storage_debug" == "true" ] -then - - echo "--- Show Mount points ---" - df -h - printf "\n----------------\n\n" - - echo "--- Show /etc/fstab file ---" - cat /etc/fstab - printf "\n----------------\n\n" - - echo "--- Show Block devices ---" - blkid - printf "\n----------------\n\n" - - echo "--- Show Block devices information ---" - lsblk -o NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PHY-SEC,LOG-SEC - printf "\n----------------\n\n" - - echo "--- Show Hardware List of Disks and Volumes ---" - lshw -class disk -class volume - ###lshw -json -class disk -class volume | jq '[.logicalname, .configuration.sectorsize, .configuration.logicalsectorsize]' - ###tail -n +1 /sys/block/vd*/queue/*_block_size - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes ---" - pvs - # pvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes information ---" - pvdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups ---" - vgs - # vgs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups information ---" - vgdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes ---" - lvs - # lvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes information ---" - lvdisplay - printf "\n----------------\n\n" - -fi - -} - - - - -############################################# -# MAIN -############################################# - -function main() { - - check_os_distribution - - # Bash Functions use logic of "If injected Terraform value is true (i.e. LVM is used for the mount point) then run Bash Function". - # Ensure Bash Function is called with quotes surrounding Bash Variable of list, otherwise will expand and override other Bash Function Arguments - - echo 'Install jq' - if [ "$os_type" = "rhel" ] ; then yum --assumeyes --debuglevel=1 install jq ; elif [ "$os_type" = "sles" ] ; then zypper install --no-confirm jq ; fi - #web_proxy_ip_port=$(echo ${var.module_var_web_proxy_url} | awk -F '^http[s]?://' '{print $2}') - #if [ ! -f /usr/local/bin/jq ]; then curl -L --proxy $web_proxy_ip_port 'https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64' -o jq && chmod +x jq && mv jq /usr/local/bin; fi - - # Create the required directories - mkdir --parents /hana/{shared,data,log} --mode 755 - mkdir --parents /usr/sap --mode 755 - mkdir --parents /sapmnt --mode 755 - - - # If any mount point uses LVM. i.e. IF with OR operator - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] || [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_install - fi - - - if [[ ${var.module_var_disk_volume_count_hana_data} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] - then - lvm_filesystem_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_lvm_pv_data_alignment_hana_data}" "vg_hana_data" "${var.module_var_lvm_vg_data_alignment_hana_data}" "${var.module_var_lvm_vg_physical_extent_size_hana_data}" "${var.module_var_lvm_lv_stripe_size_hana_data}" "${var.module_var_filesystem_hana_data}" - - elif [[ "${var.module_var_lvm_enable_hana_data}" == "false" ]] - then - physical_volume_partition_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_physical_partition_filesystem_block_size_hana_data}" "hana_data" "${var.module_var_filesystem_hana_data}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_log} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] - then - lvm_filesystem_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_lvm_pv_data_alignment_hana_log}" "vg_hana_log" "${var.module_var_lvm_vg_data_alignment_hana_log}" "${var.module_var_lvm_vg_physical_extent_size_hana_log}" "${var.module_var_lvm_lv_stripe_size_hana_log}" "${var.module_var_filesystem_hana_log}" - - elif [[ "${var.module_var_lvm_enable_hana_log}" == "false" ]] - then - physical_volume_partition_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_physical_partition_filesystem_block_size_hana_log}" "hana_log" "${var.module_var_filesystem_hana_log}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_shared} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] - then - lvm_filesystem_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_lvm_pv_data_alignment_hana_shared}" "vg_hana_shared" "${var.module_var_lvm_vg_data_alignment_hana_shared}" "${var.module_var_lvm_vg_physical_extent_size_hana_shared}" "${var.module_var_lvm_lv_stripe_size_hana_shared}" "${var.module_var_filesystem_hana_shared}" - - elif [[ "${var.module_var_lvm_enable_hana_shared}" == "false" ]] - then - physical_volume_partition_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_physical_partition_filesystem_block_size_hana_shared}" "hana_shared" "${var.module_var_filesystem_hana_shared}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_anydb} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_filesystem_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_lvm_pv_data_alignment_anydb}" "vg_anydb" "${var.module_var_lvm_vg_data_alignment_anydb}" "${var.module_var_lvm_vg_physical_extent_size_anydb}" "${var.module_var_lvm_lv_stripe_size_anydb}" "${var.module_var_filesystem_anydb}" - - elif [[ "${var.module_var_lvm_enable_anydb}" == "false" ]] - then - physical_volume_partition_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_physical_partition_filesystem_block_size_anydb}" "anydb" "${var.module_var_filesystem_anydb}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_usr_sap} -gt 0 ]] - then - physical_volume_partition_runner "/usr/sap" "${var.module_var_disk_volume_capacity_usr_sap}" "4k" "usr_sap" "${var.module_var_filesystem_usr_sap}" - fi - - - if [[ ${var.module_var_disk_volume_count_sapmnt} -gt 0 ]] - then - physical_volume_partition_runner "/sapmnt" "${var.module_var_disk_volume_capacity_sapmnt}" "4k" "sapmnt" "${var.module_var_filesystem_sapmnt}" - fi - - - if [[ ${var.module_var_disk_swapfile_size_gb} -gt 0 ]] - then - create_swap_file "${var.module_var_disk_swapfile_size_gb}" - else - create_swap_partition "${var.module_var_disk_volume_capacity_swap}" - fi - - - physical_volume_partition_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "4k" "software" "xfs" - - - mount -a - -} - - -# Run script by calling 'main' Bash Function -main - - -EOF - } - -} diff --git a/ibmcloud_powervs/host_provision/host.tf b/ibmcloud_powervs/host_provision/host.tf index c6b9958..2a28a50 100644 --- a/ibmcloud_powervs/host_provision/host.tf +++ b/ibmcloud_powervs/host_provision/host.tf @@ -23,7 +23,7 @@ resource "ibm_pi_instance" "host_via_certified_profile" { pi_image_id = ibm_pi_image.host_os_image.image_id pi_storage_type = "tier1" // tier1 required for SAP-certified Profiles - pi_key_pair_name = ibm_pi_key.host_ssh.key_id + pi_key_pair_name = ibm_pi_key.host_ssh.name pi_cloud_instance_id = var.module_var_ibm_power_group_guid pi_pin_policy = "none" pi_health_status = "OK" @@ -32,18 +32,6 @@ resource "ibm_pi_instance" "host_via_certified_profile" { network_id = var.module_var_power_group_networks[0] } - pi_volume_ids = flatten([ - ibm_pi_volume.block_volume_hana_data_tiered.*.volume_id, - ibm_pi_volume.block_volume_hana_log_tiered.*.volume_id, - ibm_pi_volume.block_volume_hana_shared_tiered.*.volume_id, - ibm_pi_volume.block_volume_anydb_tiered.*.volume_id, - ibm_pi_volume.block_volume_usr_sap_tiered.*.volume_id, - ibm_pi_volume.block_volume_sapmnt_tiered.*.volume_id, - ibm_pi_volume.block_volume_swap_tiered.*.volume_id, - ibm_pi_volume.block_volume_software_tiered.*.volume_id, - ]) - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP timeouts { create = "30m" @@ -52,6 +40,16 @@ resource "ibm_pi_instance" "host_via_certified_profile" { } +# Attach block disk volumes to host +resource "ibm_pi_volume_attach" "block_volume_attachment" { + for_each = ibm_pi_volume.block_volume_provision + + pi_cloud_instance_id = var.module_var_ibm_power_group_guid + pi_instance_id = ibm_pi_instance.host_via_certified_profile.id + pi_volume_id = each.value.volume_id +} + + # Create IBM Power Virtual Server, via bespoke size (customisable Memory and CPU) # Designed for SAP NetWeaver AS and SAP AnyDB, cannot use with SAP HANA diff --git a/ibmcloud_powervs/host_provision/host_block_storage.tf b/ibmcloud_powervs/host_provision/host_block_storage.tf index 77995de..83421fe 100644 --- a/ibmcloud_powervs/host_provision/host_block_storage.tf +++ b/ibmcloud_powervs/host_provision/host_block_storage.tf @@ -3,29 +3,23 @@ # # Types = tier1 (10 IOPS/GB), tier3 (3 IOPS/GB). See https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-about-virtual-server#storage-tiers -resource "ibm_pi_volume" "block_volume_hana_data_tiered" { - count = var.module_var_disk_volume_count_hana_data +resource "ibm_pi_volume" "block_volume_provision" { +# count = sum([ for storage_item in var.module_var_storage_definition: try(storage_item.disk_count,1) ]) - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-hana-data-${count.index}" - pi_volume_size = var.module_var_disk_volume_capacity_hana_data - pi_volume_type = var.module_var_disk_volume_type_hana_data - pi_volume_shareable = false - pi_cloud_instance_id = var.module_var_ibm_power_group_guid - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" + for_each = { + for disk in flatten( + [ for storage_item in var.module_var_storage_definition: + [ for index, count in range(0,try(storage_item.disk_count,1)) : + tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) + ] + ] + ): + disk.name => disk } -} - - -resource "ibm_pi_volume" "block_volume_hana_log_tiered" { - count = var.module_var_disk_volume_count_hana_log - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-hana-log-${count.index}" - pi_volume_size = var.module_var_disk_volume_capacity_hana_log - pi_volume_type = var.module_var_disk_volume_type_hana_log + pi_volume_name = "${var.module_var_virtual_server_hostname}-vol-${each.value.name}" + pi_volume_size = each.value.disk_size + pi_volume_type = each.value.disk_type pi_volume_shareable = false pi_cloud_instance_id = var.module_var_ibm_power_group_guid @@ -34,101 +28,5 @@ resource "ibm_pi_volume" "block_volume_hana_log_tiered" { create = "30m" delete = "30m" } -} - - -resource "ibm_pi_volume" "block_volume_hana_shared_tiered" { - count = var.module_var_disk_volume_count_hana_shared - - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-hana-shared-${count.index}" - pi_volume_size = var.module_var_disk_volume_capacity_hana_shared - pi_volume_type = var.module_var_disk_volume_type_hana_shared - pi_volume_shareable = false - pi_cloud_instance_id = var.module_var_ibm_power_group_guid - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - -resource "ibm_pi_volume" "block_volume_anydb_tiered" { - count = var.module_var_disk_volume_count_anydb - - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-anydb-${count.index}" - pi_volume_size = var.module_var_disk_volume_capacity_anydb - pi_volume_type = var.module_var_disk_volume_type_anydb - pi_volume_shareable = false - pi_cloud_instance_id = var.module_var_ibm_power_group_guid - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - -resource "ibm_pi_volume" "block_volume_usr_sap_tiered" { - count = var.module_var_disk_volume_count_usr_sap - - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-hana-usr-sap-${count.index}" - pi_volume_size = var.module_var_disk_volume_capacity_usr_sap - pi_volume_type = var.module_var_disk_volume_type_usr_sap - pi_volume_shareable = false - pi_cloud_instance_id = var.module_var_ibm_power_group_guid - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_pi_volume" "block_volume_sapmnt_tiered" { - count = var.module_var_disk_volume_count_sapmnt - - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-hana-sapmnt-${count.index}" - pi_volume_size = var.module_var_disk_volume_capacity_sapmnt - pi_volume_type = var.module_var_disk_volume_type_sapmnt - pi_volume_shareable = false - pi_cloud_instance_id = var.module_var_ibm_power_group_guid - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_pi_volume" "block_volume_swap_tiered" { - count = var.module_var_disk_volume_count_swap - - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-hana-swap-${count.index}" - pi_volume_size = var.module_var_disk_volume_capacity_swap - pi_volume_type = var.module_var_disk_volume_type_swap - pi_volume_shareable = false - pi_cloud_instance_id = var.module_var_ibm_power_group_guid - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_pi_volume" "block_volume_software_tiered" { - pi_volume_name = "${var.module_var_virtual_server_hostname}-volume-hana-software-0" - pi_volume_size = var.module_var_disk_volume_capacity_software - pi_volume_type = var.module_var_disk_volume_type_software - pi_volume_shareable = false - pi_cloud_instance_id = var.module_var_ibm_power_group_guid - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } } diff --git a/ibmcloud_powervs/host_provision/module_variables.tf b/ibmcloud_powervs/host_provision/module_variables.tf index 32cb965..745fc05 100644 --- a/ibmcloud_powervs/host_provision/module_variables.tf +++ b/ibmcloud_powervs/host_provision/module_variables.tf @@ -1,132 +1,4 @@ -variable "module_var_disk_volume_type_hana_data" {} -variable "module_var_disk_volume_count_hana_data" {} -variable "module_var_disk_volume_capacity_hana_data" {} -variable "module_var_lvm_enable_hana_data" {} -variable "module_var_lvm_pv_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_data" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_data" { - default = "64K" -} -variable "module_var_filesystem_hana_data" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_data" {} - - -variable "module_var_disk_volume_type_hana_log" {} -variable "module_var_disk_volume_count_hana_log" {} -variable "module_var_disk_volume_capacity_hana_log" {} -variable "module_var_lvm_enable_hana_log" {} -variable "module_var_lvm_pv_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_log" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_log" { - default = "64K" -} -variable "module_var_filesystem_hana_log" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_log" {} - - -variable "module_var_disk_volume_type_hana_shared" {} -variable "module_var_disk_volume_count_hana_shared" {} -variable "module_var_disk_volume_capacity_hana_shared" {} -variable "module_var_lvm_enable_hana_shared" {} -variable "module_var_lvm_pv_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_shared" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_shared" { - default = "64K" -} -variable "module_var_filesystem_hana_shared" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_shared" {} - -variable "module_var_disk_volume_type_anydb" {} -variable "module_var_disk_volume_count_anydb" {} -variable "module_var_disk_volume_capacity_anydb" {} -variable "module_var_disk_volume_iops_anydb" { - default = null -} -variable "module_var_lvm_enable_anydb" { - default = false -} -variable "module_var_lvm_pv_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_anydb" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_anydb" { - default = "64K" -} -variable "module_var_filesystem_mount_path_anydb" { -} -variable "module_var_filesystem_anydb" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_anydb" { - default = "4k" -} - - -variable "module_var_disk_volume_count_usr_sap" {} -variable "module_var_disk_volume_type_usr_sap" {} -variable "module_var_disk_volume_capacity_usr_sap" {} -variable "module_var_filesystem_usr_sap" { - default = "xfs" -} - -variable "module_var_disk_volume_count_sapmnt" {} -variable "module_var_disk_volume_type_sapmnt" {} -variable "module_var_disk_volume_capacity_sapmnt" {} -variable "module_var_filesystem_sapmnt" { - default = "xfs" -} - -variable "module_var_disk_swapfile_size_gb" {} -variable "module_var_disk_volume_count_swap" {} -variable "module_var_disk_volume_type_swap" {} -variable "module_var_disk_volume_capacity_swap" {} -variable "module_var_filesystem_swap" { - default = "xfs" -} - -variable "module_var_sap_software_download_directory" { - default = "/software" -} -variable "module_var_disk_volume_capacity_software" { - default = 525 -} -variable "module_var_disk_volume_type_software" { - default = "tier1" -} - variable "module_var_resource_prefix" {} variable "module_var_resource_tags" {} @@ -184,3 +56,5 @@ variable "module_var_web_proxy_exclusion" {} variable "module_var_os_vendor_account_user" {} variable "module_var_os_vendor_account_user_passcode" {} + +variable "module_var_storage_definition" {} diff --git a/ibmcloud_vs/host_provision/build_execution.tf b/ibmcloud_vs/host_provision/build_execution.tf index a641895..c42e3d8 100644 --- a/ibmcloud_vs/host_provision/build_execution.tf +++ b/ibmcloud_vs/host_provision/build_execution.tf @@ -5,7 +5,6 @@ resource "null_resource" "execute_os_scripts" { depends_on = [ null_resource.dns_resolv_update, - null_resource.build_script_fs_init, null_resource.build_script_os_prepare ] @@ -40,7 +39,6 @@ resource "null_resource" "execute_os_scripts" { "chmod +x $HOME/terraform_*", "echo 'Show HOME directory for reference Shell scripts were transferred'", "ls -lha $HOME", - "$HOME/terraform_fs_init.sh", "$HOME/terraform_os_prep.sh" ] } diff --git a/ibmcloud_vs/host_provision/build_filesystem_setup.tf b/ibmcloud_vs/host_provision/build_filesystem_setup.tf deleted file mode 100644 index ca0fd2c..0000000 --- a/ibmcloud_vs/host_provision/build_filesystem_setup.tf +++ /dev/null @@ -1,589 +0,0 @@ - -resource "null_resource" "build_script_fs_init" { - - # Specify the ssh connection - connection { - # The Bastion host ssh connection is established first, and then the host connection will be made from there. - # Checking Host Key is false when not using bastion_host_key - type = "ssh" - user = "root" - host = ibm_is_instance.virtual_server.primary_network_interface[0].primary_ip[0].address - private_key = var.module_var_host_private_ssh_key - bastion_host = var.module_var_bastion_floating_ip - bastion_port = var.module_var_bastion_ssh_port - bastion_user = var.module_var_bastion_user - bastion_private_key = var.module_var_bastion_private_ssh_key - #bastion_host_key = tls_private_key.bastion_ssh.public_key_openssh - } - - # Path must already exist and must not use Bash shell special variable, e.g. cannot use $HOME/file.sh - # "By default, OpenSSH's scp implementation runs in the remote user's home directory and so you can specify a relative path to upload into that home directory" - # https://www.terraform.io/language/resources/provisioners/file#destination-paths - provisioner "file" { - destination = "terraform_fs_init.sh" - content = <1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - #### - # Create LVM Physical Volumes - # - # This initialises the whole Disk or a Disk Partition as LVM Physical Volumes for use as part of LVM Logical Volumes - # - # First physical extent begins at 1MB which is defined by default_data_alignment in lvm.conf and this can be overriden by --dataalignment. - # Default 1MB offset from disk start before first LVM PV Physical Extent is used, - # and an additional offset after can be set using --dataalignmentoffset. - # - # I/O from the LVM Volume Group to the LVM Physical Volume will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Physical Volume data alignment offset - #### - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = "$disk_capacity_gb_specified" ]] - then - echo "Creating LVM Physical Volume for /dev/$disk_id using data alignment offset $lvm_pv_data_alignment" - pvcreate "/dev/$disk_id" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$disk_id to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$disk_id" & echo $lvm_volume_group_target_list) - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - - #### - # Create LVM Volume Groups and add LVM Physical Volumes - # Default is 1MiB offset from disk start before first LVM VG Physical Extent is used - # Default is 4MiB for the physical extent size (aka. block size), once set this is difficult to change - # - # I/O from the LVM Logical Volume to the LVM Volume Group will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Volume Group data alignment offset - # - # Therefore the LVM Volume Group extent size acts as the block size from LVM virtualization to the physical disks - #### - - echo "Creating $lvm_volume_group_name volume group with $(echo $lvm_volume_group_target_list | tr -d '\n'), using $lvm_volume_group_data_alignment data alignment and $lvm_volume_group_physical_extent_size extent size (block size)" - vgcreate --dataalignment $lvm_volume_group_data_alignment --physicalextentsize $lvm_volume_group_physical_extent_size $lvm_volume_group_name $(echo $lvm_volume_group_target_list | tr -d '\n') - echo "" - - ####### - # Create expandable LVM Logical Volume, using single or multiple physical disk volumes - # Default is 64K for the stripe size (aka. block size) - # - # I/O from the OS/Applications to the LVM Logical Volume will use the stripe size defined - # - # Therefore the LVM Logical Volume stripe size acts as the block size from OS to LVM virtualization - # IMPORTANT: Correct setting of this stripe size has impact on performance of OS and Applications read/write - ####### - - # Count number of LVM Physical Volumes in the LVM Volume Group - count_physical_volumes=$(echo "$lvm_volume_group_target_list" | wc -w) - - # Create LVM Logical Volume - # Stripe across all LVM Physical Volumes available in the LVM Volume Group - echo "Creating $lvm_logical_volume_name logical volume for $lvm_volume_group_name volume group, using $lvm_logical_volume_stripe_size extent size (block size)" - lvcreate $lvm_volume_group_name --yes --extents "100%FREE" --stripesize $lvm_logical_volume_stripe_size --stripes $count_physical_volumes --name "$lvm_logical_volume_name" - echo "" - - - ####### - # Create File System formatting for the LVM Logical Volume - # Filesystem is either XFS or EXT4 - ####### - - echo "Create File System formatting for the LVM Logical Volume" - mkfs.$filesystem_format "/dev/$lvm_volume_group_name/$lvm_logical_volume_name" - echo "" - - - ####### - # Permenant mount point - ####### - - # Note: After enabling multipath on the Linux host and rebooting the system, disk paths might appear in “/dev/UUID” form with a unique alphanumeric identifier. - # This can be seen by using the “lsblk” command on Linux. The preferred method is to use this disk path as opposed to the “/dev/sdX” path when formatting and mounting file systems. - - # Note: When adding an /etc/fstab entry for iSCSI based disk devices, use the “_netdev” mount option to ensure - # that the network link is ready before the operating system attempts to mount the disk. - - echo "Create fstab entries for $lvm_volume_group_name" - echo "# fstab entries for $lvm_volume_group_name" >> /etc/fstab - echo "/dev/$lvm_volume_group_name/$lvm_logical_volume_name $mount_point $filesystem_format defaults,noatime 0 0" >> /etc/fstab - echo "" - -} - - - - -############################################# -# Physical Volume Partition formatting -############################################# - -function physical_volume_partition_runner() { - - mount_point="$1" - disk_capacity_gb_specified="$2" - physical_partition_filesystem_block_size="$3" - physical_partition_name="$4" - filesystem_format="$5" - - # Ensure directory is available - mkdir --parents $mount_point - - # Clear any previous data entries on previously formatted disks - unset existing_disks_list - unset lvm_volume_group_target_list - unset physical_disks_list_with_gigabytes - - # Find existing disk devices and partitions - for disk in $(blkid -o device) - do - existing_disk_no_partition=$(echo "$disk" | sed 's/[0-9]\+$//') - export existing_disks_list=$(echo $existing_disk_no_partition & echo $existing_disks_list) - unset existing_disk_no_partition - done - - # Run calculations - physical_disks_list=$(lsblk --nodeps --bytes --noheadings -io KNAME,FSTYPE | awk 'BEGIN{OFS="\t"} {if (FNR>1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - if [[ $filesystem_format == "xfs" ]] - then - echo "#### XFS on Linux supports only filesystems with block sizes EQUAL to the system page size. ####" - echo "#### The disk can be formatted with up to 64 KiB, however it will fail to mount with the following error ####" - echo "# mount(2) system call failed: Function not implemented." - echo "" - echo "#### The default page size is hardcoded and cannot be changed. ####" - echo "" - echo "#### Red Hat KB: What is the maximum supported XFS block size in RHEL? - https://access.redhat.com/solutions/1614393 ####" - echo "#### Red Hat KB: Is it possible to change Page Size in Red Hat Enterprise Linux? - https://access.redhat.com/solutions/4854441 ####" - echo "" - echo "Page Size currently set to:" - getconf PAGESIZE - echo "" - fi - - page_size=$(getconf PAGESIZE) - - if [[ $filesystem_format == "xfs" ]] && [[ $(( page_size/1024 )) != $(echo $physical_partition_filesystem_block_size | sed 's/[^0-9]*//g') ]] - then - echo "Requested XFS Block Sizes are not equal to the Page Size, amend to Page Size" - echo "$mount_point requested as xfs with block size $physical_partition_filesystem_block_size, resetting to $page_size" - block_size_definition=$page_size - else - block_size_definition=$physical_partition_filesystem_block_size - fi - - - # Mount options for filesystem table. - # With only 4 KiB Page Size, only 2 in-memory log buffers are available so increase to each buffer's size (default 32kc) may increase performance - mount_options="defaults,noatime" - #mount_options="defaults,logbsize=256k" - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $disk_capacity_gb_specified ]] - then - echo "Creating Whole Disk Physical Volume Partition and File System for /dev/$disk_id at $mount_point with GPT Partition Table, start at 1MiB" - parted --script /dev/$disk_id \ - mklabel gpt \ - mkpart primary $filesystem_format 1MiB 100% \ - name 1 $physical_partition_name - echo "Format Disk Partition with File System, with block size $block_size_definition" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/$disk_id - echo "Write Mount Points to Linux File System Table" - PhysicalDiskUUID=$(blkid /dev/$disk_id -sUUID -ovalue) - echo "UUID=$PhysicalDiskUUID $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Swap file or partition -############################################# - -function create_swap_file() { - - echo "Create swapfile" - - swap_gb="$1" - swap_bs="128" - - swap_calc_bs=$swap_bs"M" - swap_calc_count="$((x=$swap_gb*1024,x/$swap_bs))" - dd if=/dev/zero of=/swapfile bs=$swap_calc_bs count=$swap_calc_count - chmod 600 /swapfile - mkswap /swapfile - swapon /swapfile - echo '/swapfile swap swap defaults 0 0' >> /etc/fstab - swapon --show - free -h - -} - - -function create_swap_partition() { - - find_swap_partition_by_size="$1" - - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $find_swap_partition_by_size ]] - then - echo "Create swap partition" - mkswap /dev/$disk_id - swapon /dev/$disk_id - echo "/dev/$disk_id swap swap defaults 0 0" >> /etc/fstab - swapon --show - free -h - echo "" - break - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Verify/Debug -############################################# - -storage_debug="false" - -function storage_debug_run() { - -if [ "$storage_debug" == "true" ] -then - - echo "--- Show Mount points ---" - df -h - printf "\n----------------\n\n" - - echo "--- Show /etc/fstab file ---" - cat /etc/fstab - printf "\n----------------\n\n" - - echo "--- Show Block devices ---" - blkid - printf "\n----------------\n\n" - - echo "--- Show Block devices information ---" - lsblk -o NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PHY-SEC,LOG-SEC - printf "\n----------------\n\n" - - echo "--- Show Hardware List of Disks and Volumes ---" - lshw -class disk -class volume - ###lshw -json -class disk -class volume | jq '[.logicalname, .configuration.sectorsize, .configuration.logicalsectorsize]' - ###tail -n +1 /sys/block/vd*/queue/*_block_size - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes ---" - pvs - # pvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes information ---" - pvdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups ---" - vgs - # vgs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups information ---" - vgdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes ---" - lvs - # lvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes information ---" - lvdisplay - printf "\n----------------\n\n" - -fi - -} - - - - -############################################# -# MAIN -############################################# - -function main() { - - check_os_distribution - - # Bash Functions use logic of "If injected Terraform value is true (i.e. LVM is used for the mount point) then run Bash Function". - # Ensure Bash Function is called with quotes surrounding Bash Variable of list, otherwise will expand and override other Bash Function Arguments - - echo 'Install jq' - if [ ! -f /usr/local/bin/jq ]; then curl -L 'https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64' -o jq && chmod +x jq && mv jq /usr/local/bin; fi - - # Create the required directories - mkdir --parents /hana/{shared,data,log} --mode 755 - mkdir --parents /usr/sap --mode 755 - mkdir --parents /sapmnt --mode 755 - - - # If any mount point uses LVM. i.e. IF with OR operator - #if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_shared}" == "true" || [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - #then - # lvm_install - #fi - - # Always install LVM, permenantly in-use for SAP Software download directory - lvm_install - - - if [[ ${var.module_var_disk_volume_count_hana_data} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] - then - lvm_filesystem_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_lvm_pv_data_alignment_hana_data}" "vg_hana_data" "${var.module_var_lvm_vg_data_alignment_hana_data}" "${var.module_var_lvm_vg_physical_extent_size_hana_data}" "${var.module_var_lvm_lv_stripe_size_hana_data}" "${var.module_var_filesystem_hana_data}" - - elif [[ "${var.module_var_lvm_enable_hana_data}" == "false" ]] - then - physical_volume_partition_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_physical_partition_filesystem_block_size_hana_data}" "hana_data" "${var.module_var_filesystem_hana_data}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_log} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] - then - lvm_filesystem_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_lvm_pv_data_alignment_hana_log}" "vg_hana_log" "${var.module_var_lvm_vg_data_alignment_hana_log}" "${var.module_var_lvm_vg_physical_extent_size_hana_log}" "${var.module_var_lvm_lv_stripe_size_hana_log}" "${var.module_var_filesystem_hana_log}" - - elif [[ "${var.module_var_lvm_enable_hana_log}" == "false" ]] - then - physical_volume_partition_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_physical_partition_filesystem_block_size_hana_log}" "hana_log" "${var.module_var_filesystem_hana_log}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_shared} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] - then - lvm_filesystem_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_lvm_pv_data_alignment_hana_shared}" "vg_hana_shared" "${var.module_var_lvm_vg_data_alignment_hana_shared}" "${var.module_var_lvm_vg_physical_extent_size_hana_shared}" "${var.module_var_lvm_lv_stripe_size_hana_shared}" "${var.module_var_filesystem_hana_shared}" - - elif [[ "${var.module_var_lvm_enable_hana_shared}" == "false" ]] - then - physical_volume_partition_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_physical_partition_filesystem_block_size_hana_shared}" "hana_shared" "${var.module_var_filesystem_hana_shared}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_anydb} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_filesystem_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_lvm_pv_data_alignment_anydb}" "vg_anydb" "${var.module_var_lvm_vg_data_alignment_anydb}" "${var.module_var_lvm_vg_physical_extent_size_anydb}" "${var.module_var_lvm_lv_stripe_size_anydb}" "${var.module_var_filesystem_anydb}" - - elif [[ "${var.module_var_lvm_enable_anydb}" == "false" ]] - then - physical_volume_partition_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_physical_partition_filesystem_block_size_anydb}" "anydb" "${var.module_var_filesystem_anydb}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_usr_sap} -gt 0 ]] - then - physical_volume_partition_runner "/usr/sap" "${var.module_var_disk_volume_capacity_usr_sap}" "4k" "usr_sap" "${var.module_var_filesystem_usr_sap}" - fi - - - if [[ ${var.module_var_nfs_boolean_sapmnt} == "false" ]] && [[ ${var.module_var_disk_volume_count_sapmnt} -gt 0 ]] - then - physical_volume_partition_runner "/sapmnt" "${var.module_var_disk_volume_capacity_sapmnt}" "4k" "sapmnt" "${var.module_var_filesystem_sapmnt}" - elif [[ ${var.module_var_nfs_boolean_sapmnt} == "true" ]] - then - # Establish IBM Cloud File Share mount path (DNS name and ID e.g. fsf-abcdefg.domain.com:/123456) - ibmcloud_file_share_mount_fqdn_sapmnt='${var.module_var_nfs_boolean_sapmnt ? var.module_var_nfs_fqdn_sapmnt : "null"}' - # Recommend OS mount of the IBM Cloud File Share via the DNS FQDN, which resolves to the IP Address of the IBM Cloud File Share mount path target - # Usually to increase network latency performance for SAP NetWeaver read/write operations, IP Addresses should be used - # However, the NFS protocol performs DNS lookup at mount time, and stores into the local host DNS cache - based on the DNS TTL defined by the IBM Cloud Private DNS Name Server - # As this activity is infrequent, it should not impact performance to set to the FQDN of the IBM Cloud File Share mount path target - # Install NFS - if [ "$os_type" = "rhel" ] ; then yum --assumeyes --debuglevel=1 install nfs-utils ; elif [ "$os_type" = "sles" ] ; then zypper install --no-confirm nfs-client ; fi - # Mount IBM Cloud File Share via DNS FQDN - echo "Mounting NFS for /sapmnt to IBM Cloud File Share mount path target DNS Name: $ibmcloud_file_share_mount_fqdn_sapmnt" - #sudo mount -t nfs4 -o sec=sys,nfsvers=4.1 $ibmcloud_file_share_mount_fqdn_sapmnt /sapmnt - echo "# fstab entries for NFS" >> /etc/fstab - echo "$ibmcloud_file_share_mount_fqdn_sapmnt /sapmnt nfs4 nfsvers=4.1,timeo=600,sec=sys,noatime,proto=tcp 0 0" >> /etc/fstab - fi - - - if [[ ${var.module_var_disk_swapfile_size_gb} -gt 0 ]] - then - create_swap_file "${var.module_var_disk_swapfile_size_gb}" - else - create_swap_partition "${var.module_var_disk_volume_capacity_swap}" - fi - - - #physical_volume_partition_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "4k" "software" "xfs" - lvm_filesystem_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "1M" "vg_software" "1M" "4M" "64K" "xfs" - - - mount -a - -} - - -# Run script by calling 'main' Bash Function -main - - -EOF - } - -} diff --git a/ibmcloud_vs/host_provision/host.tf b/ibmcloud_vs/host_provision/host.tf index 261f4bf..cdfe5ff 100644 --- a/ibmcloud_vs/host_provision/host.tf +++ b/ibmcloud_vs/host_provision/host.tf @@ -29,17 +29,6 @@ resource "ibm_is_instance" "virtual_server" { name = "${var.module_var_virtual_server_hostname}-volume-boot-0" } - volumes = flatten([ - var.module_var_disk_volume_type_hana_data == "custom" ? ibm_is_volume.block_volume_hana_data_custom.*.id : ibm_is_volume.block_volume_hana_data_tiered.*.id, - var.module_var_disk_volume_type_hana_log == "custom" ? ibm_is_volume.block_volume_hana_log_custom.*.id : ibm_is_volume.block_volume_hana_log_tiered.*.id, - var.module_var_disk_volume_type_hana_shared == "custom" ? ibm_is_volume.block_volume_hana_shared_custom.*.id : ibm_is_volume.block_volume_hana_shared_tiered.*.id, - var.module_var_disk_volume_type_anydb == "custom" ? ibm_is_volume.block_volume_anydb_custom.*.id : ibm_is_volume.block_volume_anydb_tiered.*.id, - ibm_is_volume.block_volume_usr_sap_tiered.*.id, - ibm_is_volume.block_volume_sapmnt_tiered.*.id, - ibm_is_volume.block_volume_swap_tiered.*.id, - ibm_is_volume.block_volume_software_tiered.id - ]) - metadata_service { enabled = true protocol = "https" @@ -53,3 +42,18 @@ resource "ibm_is_instance" "virtual_server" { } } + + +resource "ibm_is_instance_volume_attachment" "block_volume_attachment" { + + for_each = ibm_is_volume.block_volume_provision + + instance = ibm_is_instance.virtual_server.id + + name = each.value.name + volume = each.value.id + + delete_volume_on_attachment_delete = false + delete_volume_on_instance_delete = true + +} diff --git a/ibmcloud_vs/host_provision/host_block_storage.tf b/ibmcloud_vs/host_provision/host_block_storage.tf index 083afc1..aeee573 100644 --- a/ibmcloud_vs/host_provision/host_block_storage.tf +++ b/ibmcloud_vs/host_provision/host_block_storage.tf @@ -4,201 +4,25 @@ # Maximum 4 secondary data volumes per instance attached when creating an instance # Maximum 12 secondary data volumes after instance exists -resource "ibm_is_volume" "block_volume_hana_data_tiered" { - count = var.module_var_disk_volume_type_hana_data != "custom" ? var.module_var_disk_volume_count_hana_data : 0 +resource "ibm_is_volume" "block_volume_provision" { - name = "${var.module_var_virtual_server_hostname}-volume-hana-data-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_hana_data - capacity = var.module_var_disk_volume_capacity_hana_data - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_is_volume" "block_volume_hana_data_custom" { - count = var.module_var_disk_volume_type_hana_data == "custom" ? var.module_var_disk_volume_count_hana_data : 0 - - name = "${var.module_var_virtual_server_hostname}-volume-hana-data-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_hana_data - capacity = var.module_var_disk_volume_capacity_hana_data - iops = var.module_var_disk_volume_iops_hana_data - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "ibm_is_volume" "block_volume_hana_log_tiered" { - count = var.module_var_disk_volume_type_hana_log != "custom" ? var.module_var_disk_volume_count_hana_log : 0 - - name = "${var.module_var_virtual_server_hostname}-volume-hana-log-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_hana_log - capacity = var.module_var_disk_volume_capacity_hana_log - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_is_volume" "block_volume_hana_log_custom" { - count = var.module_var_disk_volume_type_hana_log == "custom" ? var.module_var_disk_volume_count_hana_log : 0 - - name = "${var.module_var_virtual_server_hostname}-volume-hana-log-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_hana_log - capacity = var.module_var_disk_volume_capacity_hana_log - iops = var.module_var_disk_volume_iops_hana_log - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "ibm_is_volume" "block_volume_hana_shared_tiered" { - count = var.module_var_disk_volume_type_hana_shared != "custom" ? var.module_var_disk_volume_count_hana_shared : 0 - - name = "${var.module_var_virtual_server_hostname}-volume-hana-shared-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_hana_shared - capacity = var.module_var_disk_volume_capacity_hana_shared - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" + for_each = { + for disk in flatten( + [ for storage_item in var.module_var_storage_definition: + [ for index, count in range(0,try(storage_item.disk_count,1)) : + tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) + ] + ] + ): + disk.name => disk } -} - -resource "ibm_is_volume" "block_volume_hana_shared_custom" { - count = var.module_var_disk_volume_type_hana_shared == "custom" ? var.module_var_disk_volume_count_hana_shared : 0 - - name = "${var.module_var_virtual_server_hostname}-volume-hana-shared-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_hana_shared - capacity = var.module_var_disk_volume_capacity_hana_shared - iops = var.module_var_disk_volume_iops_hana_shared - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "ibm_is_volume" "block_volume_anydb_tiered" { - count = var.module_var_disk_volume_type_anydb != "custom" ? var.module_var_disk_volume_count_anydb : 0 - - name = "${var.module_var_virtual_server_hostname}-volume-anydb-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_anydb - capacity = var.module_var_disk_volume_capacity_anydb - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_is_volume" "block_volume_anydb_custom" { - count = var.module_var_disk_volume_type_anydb == "custom" ? var.module_var_disk_volume_count_anydb : 0 - - name = "${var.module_var_virtual_server_hostname}-volume-anydb-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_anydb - capacity = var.module_var_disk_volume_capacity_anydb - iops = var.module_var_disk_volume_iops_anydb - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - - - -resource "ibm_is_volume" "block_volume_usr_sap_tiered" { - count = var.module_var_disk_volume_count_usr_sap - - name = "${var.module_var_virtual_server_hostname}-volume-usr-sap-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_usr_sap - capacity = var.module_var_disk_volume_capacity_usr_sap - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_is_volume" "block_volume_sapmnt_tiered" { - count = var.module_var_nfs_boolean_sapmnt ? 0 : var.module_var_disk_volume_count_sapmnt - - name = "${var.module_var_virtual_server_hostname}-volume-sapmnt-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_sapmnt - capacity = var.module_var_disk_volume_capacity_sapmnt - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_is_volume" "block_volume_swap_tiered" { - count = var.module_var_disk_volume_count_swap - - name = "${var.module_var_virtual_server_hostname}-volume-swap-${count.index}" - resource_group = var.module_var_resource_group_id - zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_swap - capacity = var.module_var_disk_volume_capacity_swap - - # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP - timeouts { - create = "30m" - delete = "30m" - } -} - -resource "ibm_is_volume" "block_volume_software_tiered" { - name = "${var.module_var_virtual_server_hostname}-volume-software-0" + name = "${var.module_var_virtual_server_hostname}-vol-${each.value.name}" resource_group = var.module_var_resource_group_id zone = local.target_vpc_availability_zone - profile = var.module_var_disk_volume_type_software - capacity = var.module_var_disk_volume_capacity_software + profile = each.value.disk_type + capacity = each.value.disk_size + iops = each.value.disk_iops # Increase operation timeout for Compute and Storage, default to 30m in all Terraform Modules for SAP timeouts { diff --git a/ibmcloud_vs/host_provision/module_variables.tf b/ibmcloud_vs/host_provision/module_variables.tf index 11c24e8..a4bada9 100644 --- a/ibmcloud_vs/host_provision/module_variables.tf +++ b/ibmcloud_vs/host_provision/module_variables.tf @@ -42,147 +42,10 @@ variable "module_var_bastion_user" {} variable "module_var_virtual_server_profile" {} -variable "module_var_disk_volume_type_hana_data" {} -variable "module_var_disk_volume_count_hana_data" {} -variable "module_var_disk_volume_capacity_hana_data" {} -variable "module_var_disk_volume_iops_hana_data" { - default = null -} -variable "module_var_lvm_enable_hana_data" {} -variable "module_var_lvm_pv_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_data" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_data" { - default = "64K" -} -variable "module_var_filesystem_hana_data" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_data" {} - - -variable "module_var_disk_volume_type_hana_log" {} -variable "module_var_disk_volume_count_hana_log" {} -variable "module_var_disk_volume_capacity_hana_log" {} -variable "module_var_disk_volume_iops_hana_log" { - default = null -} -variable "module_var_lvm_enable_hana_log" {} -variable "module_var_lvm_pv_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_log" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_log" { - default = "64K" -} -variable "module_var_filesystem_hana_log" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_log" {} - - -variable "module_var_disk_volume_type_hana_shared" {} -variable "module_var_disk_volume_count_hana_shared" {} -variable "module_var_disk_volume_capacity_hana_shared" {} -variable "module_var_disk_volume_iops_hana_shared" { - default = null -} -variable "module_var_lvm_enable_hana_shared" {} -variable "module_var_lvm_pv_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_shared" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_shared" { - default = "64K" -} -variable "module_var_filesystem_hana_shared" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_shared" {} - -variable "module_var_disk_volume_type_anydb" {} -variable "module_var_disk_volume_count_anydb" {} -variable "module_var_disk_volume_capacity_anydb" {} -variable "module_var_disk_volume_iops_anydb" { - default = null -} -variable "module_var_lvm_enable_anydb" { - default = false -} -variable "module_var_lvm_pv_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_anydb" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_anydb" { - default = "64K" -} -variable "module_var_filesystem_mount_path_anydb" { -} -variable "module_var_filesystem_anydb" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_anydb" { - default = "4k" -} - - variable "module_var_host_os_image" {} -variable "module_var_disk_volume_count_usr_sap" {} -variable "module_var_disk_volume_type_usr_sap" {} -variable "module_var_disk_volume_capacity_usr_sap" {} -variable "module_var_filesystem_usr_sap" { - default = "xfs" -} - -variable "module_var_disk_volume_count_sapmnt" {} -variable "module_var_disk_volume_type_sapmnt" {} -variable "module_var_disk_volume_capacity_sapmnt" {} -variable "module_var_filesystem_sapmnt" { - default = "xfs" -} -variable "module_var_nfs_boolean_sapmnt" {} -variable "module_var_nfs_fqdn_sapmnt" {} - -variable "module_var_disk_swapfile_size_gb" {} -variable "module_var_disk_volume_count_swap" {} -variable "module_var_disk_volume_type_swap" {} -variable "module_var_disk_volume_capacity_swap" {} -variable "module_var_filesystem_swap" { - default = "xfs" -} - -variable "module_var_sap_software_download_directory" { - default = "/software" -} -variable "module_var_disk_volume_capacity_software" { - default = 304 -} -variable "module_var_disk_volume_type_software" { - default = "5iops-tier" -} - variable "module_var_disable_ip_anti_spoofing" { default = false } + +variable "module_var_storage_definition" {} diff --git a/ibmpowervc/host_provision/build_execution.tf b/ibmpowervc/host_provision/build_execution.tf index 69496b1..4668bdd 100644 --- a/ibmpowervc/host_provision/build_execution.tf +++ b/ibmpowervc/host_provision/build_execution.tf @@ -4,16 +4,9 @@ resource "null_resource" "execute_os_scripts" { depends_on = [ - null_resource.build_script_fs_init, null_resource.build_script_os_prepare, null_resource.os_subscription_files, - openstack_compute_volume_attach_v2.volume_attachment_hana_data, - openstack_compute_volume_attach_v2.volume_attachment_hana_log, - openstack_compute_volume_attach_v2.volume_attachment_hana_shared, - openstack_compute_volume_attach_v2.volume_attachment_usr_sap, - openstack_compute_volume_attach_v2.volume_attachment_sapmnt, - openstack_compute_volume_attach_v2.volume_attachment_swap, - openstack_compute_volume_attach_v2.volume_attachment_software + openstack_compute_volume_attach_v2.block_volume_attachment ] connection { @@ -36,8 +29,7 @@ resource "null_resource" "execute_os_scripts" { "chmod +x $HOME/terraform_*", "$HOME/terraform_os_prep.sh", "$HOME/terraform_web_proxy_noninteractive.sh", - "$HOME/terraform_os_subscriptions.sh", - "$HOME/terraform_fs_init.sh" + "$HOME/terraform_os_subscriptions.sh" ] } diff --git a/ibmpowervc/host_provision/build_filesystem_setup.tf b/ibmpowervc/host_provision/build_filesystem_setup.tf deleted file mode 100644 index 8e60f00..0000000 --- a/ibmpowervc/host_provision/build_filesystem_setup.tf +++ /dev/null @@ -1,633 +0,0 @@ - -resource "null_resource" "build_script_fs_init" { - - depends_on = [ - openstack_compute_volume_attach_v2.volume_attachment_hana_data, - openstack_compute_volume_attach_v2.volume_attachment_hana_log, - openstack_compute_volume_attach_v2.volume_attachment_hana_shared, - openstack_compute_volume_attach_v2.volume_attachment_usr_sap, - openstack_compute_volume_attach_v2.volume_attachment_sapmnt, - openstack_compute_volume_attach_v2.volume_attachment_swap, - openstack_compute_volume_attach_v2.volume_attachment_software - ] - - # Specify the ssh connection - connection { - type = "ssh" - user = "root" - host = openstack_compute_instance_v2.host_provision.access_ip_v4 - private_key = var.module_var_host_private_ssh_key - } - - # Path must already exist and must not use Bash shell special variable, e.g. cannot use $HOME/file.sh - # "By default, OpenSSH's scp implementation runs in the remote user's home directory and so you can specify a relative path to upload into that home directory" - # https://www.terraform.io/language/resources/provisioners/file#destination-paths - provisioner "file" { - destination = "terraform_fs_init.sh" - content = <1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - #### - # Create LVM Physical Volumes - # - # This initialises the whole Disk or a Disk Partition as LVM Physical Volumes for use as part of LVM Logical Volumes - # - # First physical extent begins at 1MB which is defined by default_data_alignment in lvm.conf and this can be overriden by --dataalignment. - # Default 1MB offset from disk start before first LVM PV Physical Extent is used, - # and an additional offset after can be set using --dataalignmentoffset. - # - # I/O from the LVM Volume Group to the LVM Physical Volume will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Physical Volume data alignment offset - #### - - - #### - # IBM Power LPAR storage notes and commands - #### - # - # IBM Power LPAR storage: IF multipath_component_detection in /etc/lvm/lvm.conf is set to = 1 (enable) - # this results in LVM pvcreate error: - # Cannot use /dev/sdX: device is a multipath component - # - # IBM Power LPAR storage: IF multipath_component_detection in /etc/lvm/lvm.conf is set to = 0 (disable) - # this results in LVM pvcreate error: - # Can't open /dev/sdX exclusively. Mounted filesystem? - # Can't open /dev/sdX exclusively. Mounted filesystem? - # - # IBM Power LPAR storage: Show multipath devices: - # multipath -l - # - # IBM Power LPAR storage: Map Disk Volumes to Device Mapper (DM) Multipath - # multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\" - # - # Each Block Storage volume has 1 Multipath DM, which contains 2 path groups each with 4 paths (i.e. total of 8 device paths for 1 volume, such as /dev/sdb to /dev/sdi) - - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = "$disk_capacity_gb_specified" ]] - then - echo "Creating LVM Physical Volume for /dev/$disk_id using data alignment offset $lvm_pv_data_alignment" - - echo "IBM Power LPAR: Detecting Multipath DM" - multipath_map=$(multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\" | sed "s/^/'/" | sed "s/ dm/',dm/" | sed "s/\(.*\) /\1,/") - multipath_dm_name=$(printf "$multipath_map" | awk -F',' "/$disk_id/ { print \$2 }") - echo "/dev/$disk_id is multipath /dev/$multipath_dm_name, using this for pvcreate" - pvcreate "/dev/$multipath_dm_name" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$multipath_dm_name to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$multipath_dm_name" & echo $lvm_volume_group_target_list) - - #pvcreate "/dev/$disk_id" --dataalignment $lvm_pv_data_alignment - #echo "Adding /dev/$disk_id to a list for the LVM Volume Group for $mount_point" - #lvm_volume_group_target_list=$(echo "/dev/$disk_id" & echo $lvm_volume_group_target_list) - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - - #### - # Create LVM Volume Groups and add LVM Physical Volumes - # Default is 1MiB offset from disk start before first LVM VG Physical Extent is used - # Default is 4MiB for the physical extent size (aka. block size), once set this is difficult to change - # - # I/O from the LVM Logical Volume to the LVM Volume Group will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Volume Group data alignment offset - # - # Therefore the LVM Volume Group extent size acts as the block size from LVM virtualization to the physical disks - #### - - lvm_volume_group_target_list=$(echo $lvm_volume_group_target_list | tr ' ' '\n' | sort -u) - echo "Creating $lvm_volume_group_name volume group with $(echo $lvm_volume_group_target_list | tr -d '\n'), using $lvm_volume_group_data_alignment data alignment and $lvm_volume_group_physical_extent_size extent size (block size)" - vgcreate --dataalignment $lvm_volume_group_data_alignment --physicalextentsize $lvm_volume_group_physical_extent_size $lvm_volume_group_name $(echo $lvm_volume_group_target_list | tr -d '\n') - echo "" - - ####### - # Create expandable LVM Logical Volume, using single or multiple physical disk volumes - # Default is 64K for the stripe size (aka. block size) - # - # I/O from the OS/Applications to the LVM Logical Volume will use the stripe size defined - # - # Therefore the LVM Logical Volume stripe size acts as the block size from OS to LVM virtualization - # IMPORTANT: Correct setting of this stripe size has impact on performance of OS and Applications read/write - ####### - - # Count number of LVM Physical Volumes in the LVM Volume Group - count_physical_volumes=$(echo "$lvm_volume_group_target_list" | wc -w) - - # Create LVM Logical Volume - # Stripe across all LVM Physical Volumes available in the LVM Volume Group - echo "Creating $lvm_logical_volume_name logical volume for $lvm_volume_group_name volume group, using $lvm_logical_volume_stripe_size extent size (block size)" - lvcreate $lvm_volume_group_name --yes --extents "100%FREE" --stripesize $lvm_logical_volume_stripe_size --stripes $count_physical_volumes --name "$lvm_logical_volume_name" - echo "" - - - ####### - # Create File System formatting for the LVM Logical Volume - # Filesystem is either XFS or EXT4 - ####### - - echo "Create File System formatting for the LVM Logical Volume" - mkfs.$filesystem_format "/dev/$lvm_volume_group_name/$lvm_logical_volume_name" - echo "" - - - ####### - # Permenant mount point - ####### - - # Note: After enabling multipath on the Linux host and rebooting the system, disk paths might appear in “/dev/UUID” form with a unique alphanumeric identifier. - # This can be seen by using the “lsblk” command on Linux. The preferred method is to use this disk path as opposed to the “/dev/sdX” path when formatting and mounting file systems. - - # Note: When adding an /etc/fstab entry for iSCSI based disk devices, use the “_netdev” mount option to ensure - # that the network link is ready before the operating system attempts to mount the disk. - - echo "Create fstab entries for $lvm_volume_group_name" - echo "# fstab entries for $lvm_volume_group_name" >> /etc/fstab - echo "/dev/$lvm_volume_group_name/$lvm_logical_volume_name $mount_point $filesystem_format defaults,noatime 0 0" >> /etc/fstab - echo "" - -} - - - - -############################################# -# Physical Volume Partition formatting -############################################# - -function physical_volume_partition_runner() { - - mount_point="$1" - disk_capacity_gb_specified="$2" - physical_partition_filesystem_block_size="$3" - physical_partition_name="$4" - filesystem_format="$5" - - # Ensure directory is available - mkdir --parents $mount_point - - # Clear any previous data entries on previously formatted disks - unset existing_disks_list - unset lvm_volume_group_target_list - unset physical_disks_list_with_gigabytes - - # Find existing disk devices and partitions - for disk in $(blkid -o device) - do - existing_disk_no_partition=$(echo "$disk" | sed 's/[0-9]\+$//') - export existing_disks_list=$(echo $existing_disk_no_partition & echo $existing_disks_list) - unset existing_disk_no_partition - done - - # Run calculations - physical_disks_list=$(lsblk --nodeps --bytes --noheadings -io KNAME,FSTYPE | awk 'BEGIN{OFS="\t"} {if (FNR>1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - if [[ $filesystem_format == "xfs" ]] - then - echo "#### XFS on Linux supports only filesystems with block sizes EQUAL to the system page size. ####" - echo "#### The disk can be formatted with up to 64 KiB, however it will fail to mount with the following error ####" - echo "# mount(2) system call failed: Function not implemented." - echo "" - echo "#### The default page size is hardcoded and cannot be changed. ####" - echo "" - echo "#### Red Hat KB: What is the maximum supported XFS block size in RHEL? - https://access.redhat.com/solutions/1614393 ####" - echo "#### Red Hat KB: Is it possible to change Page Size in Red Hat Enterprise Linux? - https://access.redhat.com/solutions/4854441 ####" - echo "" - echo "Page Size currently set to:" - getconf PAGESIZE - echo "" - fi - - page_size=$(getconf PAGESIZE) - - if [[ $filesystem_format == "xfs" ]] && [[ $(( page_size/1024 )) != $(echo $physical_partition_filesystem_block_size | sed 's/[^0-9]*//g') ]] - then - echo "Requested XFS Block Sizes are not equal to the Page Size, amend to Page Size" - echo "$mount_point requested as xfs with block size $physical_partition_filesystem_block_size, resetting to $page_size" - block_size_definition=$page_size - else - block_size_definition=$physical_partition_filesystem_block_size - fi - - - # Mount options for filesystem table. - # With only 4 KiB Page Size, only 2 in-memory log buffers are available so increase to each buffer's size (default 32kc) may increase performance - mount_options="defaults,noatime" - #mount_options="defaults,logbsize=256k" - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $disk_capacity_gb_specified ]] - then - echo "Creating Whole Disk Physical Volume Partition and File System for /dev/$disk_id at $mount_point with GPT Partition Table, start at 1MiB" - - echo "IBM Power LPAR: Detecting Multipath DM" - multipath_map=$(multipathd show maps json | jq --raw-output '.maps[] | [.path_groups[].paths[].dev, .sysfs, .uuid] | @sh' | tr -d \'\" | sed "s/^/'/" | sed "s/ dm/',dm/" | sed "s/\(.*\) /\1,/") - multipath_dm_name=$(printf "$multipath_map" | awk -F',' "/$disk_id/ { print \$2 }") - echo "/dev/$disk_id is multipath /dev/$multipath_dm_name, using this for parted and mkfs" - exist_test=$(parted --machine --script /dev/$multipath_dm_name print) - if [[ ! -z $(echo $exist_test | grep $filesystem_format) ]]; then echo 'Already exists, skipping partition creation and formatting...' && continue ; else echo 'Partition Table does not contain physical partition, executing...' ; fi - parted --script /dev/$multipath_dm_name \ - mklabel gpt \ - mkpart primary $filesystem_format 1MiB 100% \ - name 1 $physical_partition_name - echo "Format Disk Partition with File System, with block size $block_size_definition" - partition_id=$(lsblk /dev/$disk_id -o NAME,TYPE --raw | awk "/part/ { print \$1 }" | awk '!/sd/') - echo "Disk Partition ID = $partition_id" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/mapper/$partition_id - echo "Write Mount Points to Linux File System Table" - # PhysicalDiskUUID=$(blkid /dev/$multipath_dm_name -s PTUUID -o value) - echo "/dev/mapper/$partition_id $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - - #parted --script /dev/$disk_id \ - # mklabel gpt \ - # mkpart primary $filesystem_format 1MiB 100% \ - # name 1 $physical_partition_name - #echo "Format Disk Partition with File System, with block size $block_size_definition" - #mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/$disk_id - #echo "Write Mount Points to Linux File System Table" - #PhysicalDiskUUID=$(blkid /dev/$disk_id -sUUID -ovalue) - #echo "UUID=$PhysicalDiskUUID $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Swap file or partition -############################################# - -function create_swap_file() { - - echo "Create swapfile" - - swap_gb="$1" - swap_bs="128" - - swap_calc_bs=$swap_bs"M" - swap_calc_count="$((x=$swap_gb*1024,x/$swap_bs))" - dd if=/dev/zero of=/swapfile bs=$swap_calc_bs count=$swap_calc_count - chmod 600 /swapfile - mkswap /swapfile - swapon /swapfile - echo '/swapfile swap swap defaults 0 0' >> /etc/fstab - swapon --show - free -h - -} - - -function create_swap_partition() { - - find_swap_partition_by_size="$1" - - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $find_swap_partition_by_size ]] - then - echo "Create swap partition" - mkswap /dev/$disk_id - swapon /dev/$disk_id - echo "/dev/$disk_id swap swap defaults 0 0" >> /etc/fstab - swapon --show - free -h - echo "" - break - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Verify/Debug -############################################# - -storage_debug="false" - -function storage_debug_run() { - -if [ "$storage_debug" == "true" ] -then - - echo "--- Show Mount points ---" - df -h - printf "\n----------------\n\n" - - echo "--- Show /etc/fstab file ---" - cat /etc/fstab - printf "\n----------------\n\n" - - echo "--- Show Block devices ---" - blkid - printf "\n----------------\n\n" - - echo "--- Show Block devices information ---" - lsblk -o NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PHY-SEC,LOG-SEC - printf "\n----------------\n\n" - - echo "--- Show Hardware List of Disks and Volumes ---" - lshw -class disk -class volume - ###lshw -json -class disk -class volume | jq '[.logicalname, .configuration.sectorsize, .configuration.logicalsectorsize]' - ###tail -n +1 /sys/block/vd*/queue/*_block_size - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes ---" - pvs - # pvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes information ---" - pvdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups ---" - vgs - # vgs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups information ---" - vgdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes ---" - lvs - # lvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes information ---" - lvdisplay - printf "\n----------------\n\n" - -fi - -} - - - - -############################################# -# MAIN -############################################# - -function main() { - - check_os_distribution - - # Bash Functions use logic of "If injected Terraform value is true (i.e. LVM is used for the mount point) then run Bash Function". - # Ensure Bash Function is called with quotes surrounding Bash Variable of list, otherwise will expand and override other Bash Function Arguments - - printf "\n----------------\n\n" - echo '--- Rescan SCSI bus for new SCSI/iSCSI devices ---' - /usr/bin/rescan-scsi-bus.sh - printf "\n----------------\n\n" - - echo 'Install jq' - if [ "$os_type" = "rhel" ] ; then yum --assumeyes --debuglevel=1 install jq ; elif [ "$os_type" = "sles" ] ; then zypper install --no-confirm jq ; fi - #web_proxy_ip_port=$(echo ${var.module_var_web_proxy_url} | awk -F '^http[s]?://' '{print $2}') - #if [ ! -f /usr/local/bin/jq ]; then curl -L --proxy $web_proxy_ip_port 'https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64' -o jq && chmod +x jq && mv jq /usr/local/bin; fi - - # Create the required directories - mkdir --parents /hana/{shared,data,log} --mode 755 - mkdir --parents /usr/sap --mode 755 - mkdir --parents /sapmnt --mode 755 - - - # If any mount point uses LVM. i.e. IF with OR operator - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] || [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_install - fi - - - if [[ ${var.module_var_disk_volume_count_hana_data} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] - then - lvm_filesystem_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_lvm_pv_data_alignment_hana_data}" "vg_hana_data" "${var.module_var_lvm_vg_data_alignment_hana_data}" "${var.module_var_lvm_vg_physical_extent_size_hana_data}" "${var.module_var_lvm_lv_stripe_size_hana_data}" "${var.module_var_filesystem_hana_data}" - - elif [[ "${var.module_var_lvm_enable_hana_data}" == "false" ]] - then - physical_volume_partition_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_physical_partition_filesystem_block_size_hana_data}" "hana_data" "${var.module_var_filesystem_hana_data}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_log} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] - then - lvm_filesystem_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_lvm_pv_data_alignment_hana_log}" "vg_hana_log" "${var.module_var_lvm_vg_data_alignment_hana_log}" "${var.module_var_lvm_vg_physical_extent_size_hana_log}" "${var.module_var_lvm_lv_stripe_size_hana_log}" "${var.module_var_filesystem_hana_log}" - - elif [[ "${var.module_var_lvm_enable_hana_log}" == "false" ]] - then - physical_volume_partition_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_physical_partition_filesystem_block_size_hana_log}" "hana_log" "${var.module_var_filesystem_hana_log}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_shared} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] - then - lvm_filesystem_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_lvm_pv_data_alignment_hana_shared}" "vg_hana_shared" "${var.module_var_lvm_vg_data_alignment_hana_shared}" "${var.module_var_lvm_vg_physical_extent_size_hana_shared}" "${var.module_var_lvm_lv_stripe_size_hana_shared}" "${var.module_var_filesystem_hana_shared}" - - elif [[ "${var.module_var_lvm_enable_hana_shared}" == "false" ]] - then - physical_volume_partition_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_physical_partition_filesystem_block_size_hana_shared}" "hana_shared" "${var.module_var_filesystem_hana_shared}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_anydb} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_filesystem_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_lvm_pv_data_alignment_anydb}" "vg_anydb" "${var.module_var_lvm_vg_data_alignment_anydb}" "${var.module_var_lvm_vg_physical_extent_size_anydb}" "${var.module_var_lvm_lv_stripe_size_anydb}" "${var.module_var_filesystem_anydb}" - - elif [[ "${var.module_var_lvm_enable_anydb}" == "false" ]] - then - physical_volume_partition_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_physical_partition_filesystem_block_size_anydb}" "anydb" "${var.module_var_filesystem_anydb}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_usr_sap} -gt 0 ]] - then - physical_volume_partition_runner "/usr/sap" "${var.module_var_disk_volume_capacity_usr_sap}" "4k" "usr_sap" "${var.module_var_filesystem_usr_sap}" - fi - - - if [[ ${var.module_var_disk_volume_count_sapmnt} -gt 0 ]] - then - physical_volume_partition_runner "/sapmnt" "${var.module_var_disk_volume_capacity_sapmnt}" "4k" "sapmnt" "${var.module_var_filesystem_sapmnt}" - fi - - - if [[ ${var.module_var_disk_swapfile_size_gb} -gt 0 ]] - then - create_swap_file "${var.module_var_disk_swapfile_size_gb}" - else - create_swap_partition "${var.module_var_disk_volume_capacity_swap}" - fi - - - physical_volume_partition_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "4k" "software" "xfs" - - - mount -a - -} - - -# Run script by calling 'main' Bash Function -main - - -EOF - } - -} diff --git a/ibmpowervc/host_provision/build_os_prepare.tf b/ibmpowervc/host_provision/build_os_prepare.tf index 5f9def3..f8a929e 100644 --- a/ibmpowervc/host_provision/build_os_prepare.tf +++ b/ibmpowervc/host_provision/build_os_prepare.tf @@ -2,13 +2,7 @@ resource "null_resource" "build_script_os_prepare" { depends_on = [ - openstack_compute_volume_attach_v2.volume_attachment_hana_data, - openstack_compute_volume_attach_v2.volume_attachment_hana_log, - openstack_compute_volume_attach_v2.volume_attachment_hana_shared, - openstack_compute_volume_attach_v2.volume_attachment_usr_sap, - openstack_compute_volume_attach_v2.volume_attachment_sapmnt, - openstack_compute_volume_attach_v2.volume_attachment_swap, - openstack_compute_volume_attach_v2.volume_attachment_software + openstack_compute_volume_attach_v2.block_volume_attachment ] # Specify the ssh connection diff --git a/ibmpowervc/host_provision/build_os_subscriptions.tf b/ibmpowervc/host_provision/build_os_subscriptions.tf index 56206af..4fa091e 100644 --- a/ibmpowervc/host_provision/build_os_subscriptions.tf +++ b/ibmpowervc/host_provision/build_os_subscriptions.tf @@ -4,13 +4,7 @@ resource "null_resource" "os_subscription_files" { depends_on = [ - openstack_compute_volume_attach_v2.volume_attachment_hana_data, - openstack_compute_volume_attach_v2.volume_attachment_hana_log, - openstack_compute_volume_attach_v2.volume_attachment_hana_shared, - openstack_compute_volume_attach_v2.volume_attachment_usr_sap, - openstack_compute_volume_attach_v2.volume_attachment_sapmnt, - openstack_compute_volume_attach_v2.volume_attachment_swap, - openstack_compute_volume_attach_v2.volume_attachment_software + openstack_compute_volume_attach_v2.block_volume_attachment ] connection { diff --git a/ibmpowervc/host_provision/build_web_proxy_noninteractive.tf b/ibmpowervc/host_provision/build_web_proxy_noninteractive.tf index 27a4028..e3073b5 100644 --- a/ibmpowervc/host_provision/build_web_proxy_noninteractive.tf +++ b/ibmpowervc/host_provision/build_web_proxy_noninteractive.tf @@ -2,13 +2,7 @@ resource "null_resource" "build_script_web_proxy_noninteractive" { depends_on = [ - openstack_compute_volume_attach_v2.volume_attachment_hana_data, - openstack_compute_volume_attach_v2.volume_attachment_hana_log, - openstack_compute_volume_attach_v2.volume_attachment_hana_shared, - openstack_compute_volume_attach_v2.volume_attachment_usr_sap, - openstack_compute_volume_attach_v2.volume_attachment_sapmnt, - openstack_compute_volume_attach_v2.volume_attachment_swap, - openstack_compute_volume_attach_v2.volume_attachment_software + openstack_compute_volume_attach_v2.block_volume_attachment ] # Specify the ssh connection diff --git a/ibmpowervc/host_provision/host.tf b/ibmpowervc/host_provision/host.tf index b723cc7..15028c3 100644 --- a/ibmpowervc/host_provision/host.tf +++ b/ibmpowervc/host_provision/host.tf @@ -39,71 +39,10 @@ resource "openstack_compute_instance_v2" "host_provision" { ### Attach Data Volumes to the host -resource "openstack_compute_volume_attach_v2" "volume_attachment_hana_data" { - count = length(openstack_blockstorage_volume_v2.block_volume_hana_data.*.id) +resource "openstack_compute_volume_attach_v2" "block_volume_attachment" { + for_each = openstack_blockstorage_volume_v2.block_volume_provision instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_hana_data[count.index].id - #multiattach = true -} - - -resource "openstack_compute_volume_attach_v2" "volume_attachment_hana_log" { - count = length(openstack_blockstorage_volume_v2.block_volume_hana_log.*.id) - - instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_hana_log[count.index].id - #multiattach = true -} - - -resource "openstack_compute_volume_attach_v2" "volume_attachment_hana_shared" { - count = length(openstack_blockstorage_volume_v2.block_volume_hana_shared.*.id) - - instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_hana_shared[count.index].id - #multiattach = true -} - - -resource "openstack_compute_volume_attach_v2" "volume_attachment_anydb" { - count = length(openstack_blockstorage_volume_v2.block_volume_anydb.*.id) - - instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_anydb[count.index].id - #multiattach = true -} - - -resource "openstack_compute_volume_attach_v2" "volume_attachment_usr_sap" { - count = length(openstack_blockstorage_volume_v2.block_volume_usr_sap.*.id) - - instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_usr_sap[count.index].id - #multiattach = true -} - - -resource "openstack_compute_volume_attach_v2" "volume_attachment_sapmnt" { - count = length(openstack_blockstorage_volume_v2.block_volume_sapmnt.*.id) - - instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_sapmnt[count.index].id - #multiattach = true -} - - -resource "openstack_compute_volume_attach_v2" "volume_attachment_swap" { - count = length(openstack_blockstorage_volume_v2.block_volume_swap.*.id) - - instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_swap[count.index].id - #multiattach = true -} - - -resource "openstack_compute_volume_attach_v2" "volume_attachment_software" { - instance_id = openstack_compute_instance_v2.host_provision.id - volume_id = openstack_blockstorage_volume_v2.block_volume_software.id + volume_id = each.value.id #multiattach = true } diff --git a/ibmpowervc/host_provision/host_storage.tf b/ibmpowervc/host_provision/host_storage.tf index b924804..4ebdff7 100644 --- a/ibmpowervc/host_provision/host_storage.tf +++ b/ibmpowervc/host_provision/host_storage.tf @@ -2,150 +2,22 @@ # Create IBM PowerVC Data Volumes # https://www.ibm.com/docs/en/powervc/1.4.3?topic=apis-supported-volume-type-extra-specs -resource "openstack_blockstorage_volume_v2" "block_volume_hana_data" { - count = var.module_var_disk_volume_count_hana_data - - name = "${var.module_var_lpar_hostname}-volume-hana-data-${count.index}" - size = var.module_var_disk_volume_capacity_hana_data - volume_type = local.ibm_storwize_storage_template_sap_hana_fast_name - #multiattach = true - - scheduler_hints { - - # After provisioning, modifications to extra_specs parameters may not be identified during Terraform refresh and re-apply - additional_properties = { - "drivers:multipath" : "0" - } - - } - -} - - -resource "openstack_blockstorage_volume_v2" "block_volume_hana_log" { - count = var.module_var_disk_volume_count_hana_log - - name = "${var.module_var_lpar_hostname}-volume-hana-log-${count.index}" - size = var.module_var_disk_volume_capacity_hana_log - volume_type = local.ibm_storwize_storage_template_sap_hana_fast_name - #multiattach = true - - scheduler_hints { - - # After provisioning, modifications to extra_specs parameters may not be identified during Terraform refresh and re-apply - additional_properties = { - "drivers:multipath" : "0" - } - - } - -} - - -resource "openstack_blockstorage_volume_v2" "block_volume_hana_shared" { - count = var.module_var_disk_volume_count_hana_shared - - name = "${var.module_var_lpar_hostname}-volume-hana-shared-${count.index}" - size = var.module_var_disk_volume_capacity_hana_shared - volume_type = local.ibm_storwize_storage_template_sap_other_name - #multiattach = true - - scheduler_hints { - - # After provisioning, modifications to extra_specs parameters may not be identified during Terraform refresh and re-apply - additional_properties = { - "drivers:multipath" : "0" - } - - } - -} - - -resource "openstack_blockstorage_volume_v2" "block_volume_anydb" { - count = var.module_var_disk_volume_count_anydb - - name = "${var.module_var_lpar_hostname}-volume-anydb-${count.index}" - size = var.module_var_disk_volume_capacity_anydb - volume_type = local.ibm_storwize_storage_template_sap_other_name - #multiattach = true - - scheduler_hints { - - # After provisioning, modifications to extra_specs parameters may not be identified during Terraform refresh and re-apply - additional_properties = { - "drivers:multipath" : "0" - } - - } - -} - - -resource "openstack_blockstorage_volume_v2" "block_volume_usr_sap" { - count = var.module_var_disk_volume_count_usr_sap - - name = "${var.module_var_lpar_hostname}-volume-usr-sap-${count.index}" - size = var.module_var_disk_volume_capacity_usr_sap - volume_type = local.ibm_storwize_storage_template_sap_other_name - #multiattach = true - - scheduler_hints { - - # After provisioning, modifications to extra_specs parameters may not be identified during Terraform refresh and re-apply - additional_properties = { - "drivers:multipath" : "0" - } - - } - -} - - -resource "openstack_blockstorage_volume_v2" "block_volume_sapmnt" { - count = var.module_var_disk_volume_count_sapmnt - - name = "${var.module_var_lpar_hostname}-volume-sapmnt-${count.index}" - size = var.module_var_disk_volume_capacity_sapmnt - volume_type = local.ibm_storwize_storage_template_sap_other_name - #multiattach = true - - scheduler_hints { - - # After provisioning, modifications to extra_specs parameters may not be identified during Terraform refresh and re-apply - additional_properties = { - "drivers:multipath" : "0" - } - - } - -} - - -resource "openstack_blockstorage_volume_v2" "block_volume_swap" { - count = var.module_var_disk_volume_count_swap - - name = "${var.module_var_lpar_hostname}-volume-swap-${count.index}" - size = var.module_var_disk_volume_capacity_swap - volume_type = local.ibm_storwize_storage_template_sap_other_name - #multiattach = true - - scheduler_hints { - - # After provisioning, modifications to extra_specs parameters may not be identified during Terraform refresh and re-apply - additional_properties = { - "drivers:multipath" : "0" - } - - } - -} - - -resource "openstack_blockstorage_volume_v2" "block_volume_software" { - name = "${var.module_var_lpar_hostname}-volume-software-0" - size = var.module_var_disk_volume_capacity_software - volume_type = local.ibm_storwize_storage_template_sap_other_name +resource "openstack_blockstorage_volume_v2" "block_volume_provision" { + + for_each = { + for disk in flatten( + [ for storage_item in var.module_var_storage_definition: + [ for index, count in range(0,try(storage_item.disk_count,1)) : + tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) + ] + ] + ): + disk.name => disk + } + + name = "${var.module_var_lpar_hostname}-vol-${each.value.name}" + size = each.value.disk_size + volume_type = each.value.disk_type #multiattach = true scheduler_hints { diff --git a/ibmpowervc/host_provision/module_variables.tf b/ibmpowervc/host_provision/module_variables.tf index 9776bd5..4ff0ade 100644 --- a/ibmpowervc/host_provision/module_variables.tf +++ b/ibmpowervc/host_provision/module_variables.tf @@ -52,125 +52,6 @@ variable "module_var_lpar_hostname" { variable "module_var_dns_root_domain_name" { } -variable "module_var_disk_volume_count_hana_data" {} -variable "module_var_disk_volume_capacity_hana_data" {} -variable "module_var_lvm_enable_hana_data" {} -variable "module_var_lvm_pv_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_data" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_data" { - default = "64K" -} -variable "module_var_filesystem_hana_data" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_data" {} - - - -variable "module_var_disk_volume_count_hana_log" {} -variable "module_var_disk_volume_capacity_hana_log" {} -variable "module_var_lvm_enable_hana_log" {} -variable "module_var_lvm_pv_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_log" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_log" { - default = "64K" -} -variable "module_var_filesystem_hana_log" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_log" {} - - -variable "module_var_disk_volume_count_hana_shared" {} -variable "module_var_disk_volume_capacity_hana_shared" {} -variable "module_var_lvm_enable_hana_shared" {} -variable "module_var_lvm_pv_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_shared" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_shared" { - default = "64K" -} -variable "module_var_filesystem_hana_shared" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_shared" {} - -variable "module_var_disk_volume_count_anydb" {} -variable "module_var_disk_volume_capacity_anydb" {} -variable "module_var_disk_volume_iops_anydb" { - default = null -} -variable "module_var_lvm_enable_anydb" { - default = false -} -variable "module_var_lvm_pv_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_anydb" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_anydb" { - default = "64K" -} -variable "module_var_filesystem_mount_path_anydb" { -} -variable "module_var_filesystem_anydb" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_anydb" { - default = "4k" -} - - -variable "module_var_disk_volume_count_usr_sap" {} -variable "module_var_disk_volume_capacity_usr_sap" {} -variable "module_var_filesystem_usr_sap" { - default = "xfs" -} - -variable "module_var_disk_volume_count_sapmnt" {} -variable "module_var_disk_volume_capacity_sapmnt" {} -variable "module_var_filesystem_sapmnt" { - default = "xfs" -} - -variable "module_var_disk_swapfile_size_gb" {} -variable "module_var_disk_volume_count_swap" {} -variable "module_var_disk_volume_capacity_swap" {} -variable "module_var_filesystem_swap" { - default = "xfs" -} - -variable "module_var_sap_software_download_directory" { - default = "/software" -} -variable "module_var_disk_volume_capacity_software" { - default = 525 -} - variable "module_var_web_proxy_url" {} variable "module_var_web_proxy_exclusion" {} @@ -179,3 +60,5 @@ variable "module_var_os_vendor_account_user_passcode" {} variable "module_var_os_systems_mgmt_host" { default = "" } + +variable "module_var_storage_definition" {} diff --git a/ovirt_kvm_vm/.gitkeep b/ovirt_kvm_vm/.gitkeep deleted file mode 100644 index e69de29..0000000 diff --git a/vmware_vm/host_provision/build_execution.tf b/vmware_vm/host_provision/build_execution.tf index d45c74e..e5ce3cd 100644 --- a/vmware_vm/host_provision/build_execution.tf +++ b/vmware_vm/host_provision/build_execution.tf @@ -4,15 +4,7 @@ resource "null_resource" "execute_os_scripts" { depends_on = [ - vsphere_virtual_disk.virtual_disk_hana_data, - vsphere_virtual_disk.virtual_disk_hana_log, - vsphere_virtual_disk.virtual_disk_hana_shared, - vsphere_virtual_disk.virtual_disk_anydb, - vsphere_virtual_disk.virtual_disk_usr_sap, - vsphere_virtual_disk.virtual_disk_sapmnt, - vsphere_virtual_disk.virtual_disk_swap, - vsphere_virtual_disk.virtual_disk_software, - null_resource.build_script_fs_init, + vsphere_virtual_disk.virtual_disk_provision, null_resource.build_script_os_prepare, null_resource.os_subscription_files, vsphere_virtual_machine.host_provision @@ -39,8 +31,7 @@ resource "null_resource" "execute_os_scripts" { "chmod +x $HOME/terraform_*", "$HOME/terraform_os_prep.sh", "$HOME/terraform_web_proxy_noninteractive.sh", - "$HOME/terraform_os_subscriptions.sh", - "$HOME/terraform_fs_init.sh" + "$HOME/terraform_os_subscriptions.sh" ] } diff --git a/vmware_vm/host_provision/build_filesystem_setup.tf b/vmware_vm/host_provision/build_filesystem_setup.tf deleted file mode 100644 index f1d433d..0000000 --- a/vmware_vm/host_provision/build_filesystem_setup.tf +++ /dev/null @@ -1,575 +0,0 @@ - -resource "null_resource" "build_script_fs_init" { - - depends_on = [ - vsphere_virtual_machine.host_provision - ] - - # Specify the ssh connection - connection { - type = "ssh" - user = "root" - host = vsphere_virtual_machine.host_provision.default_ip_address - private_key = var.module_var_host_private_ssh_key - timeout = "30s" - } - - # Path must already exist and must not use Bash shell special variable, e.g. cannot use $HOME/file.sh - # "By default, OpenSSH's scp implementation runs in the remote user's home directory and so you can specify a relative path to upload into that home directory" - # https://www.terraform.io/language/resources/provisioners/file#destination-paths - provisioner "file" { - destination = "terraform_fs_init.sh" - content = <1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - #### - # Create LVM Physical Volumes - # - # This initialises the whole Disk or a Disk Partition as LVM Physical Volumes for use as part of LVM Logical Volumes - # - # First physical extent begins at 1MB which is defined by default_data_alignment in lvm.conf and this can be overriden by --dataalignment. - # Default 1MB offset from disk start before first LVM PV Physical Extent is used, - # and an additional offset after can be set using --dataalignmentoffset. - # - # I/O from the LVM Volume Group to the LVM Physical Volume will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Physical Volume data alignment offset - #### - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = "$disk_capacity_gb_specified" ]] - then - echo "Creating LVM Physical Volume for /dev/$disk_id using data alignment offset $lvm_pv_data_alignment" - pvcreate "/dev/$disk_id" --dataalignment $lvm_pv_data_alignment - echo "Adding /dev/$disk_id to a list for the LVM Volume Group for $mount_point" - lvm_volume_group_target_list=$(echo "/dev/$disk_id" & echo $lvm_volume_group_target_list) - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - - #### - # Create LVM Volume Groups and add LVM Physical Volumes - # Default is 1MiB offset from disk start before first LVM VG Physical Extent is used - # Default is 4MiB for the physical extent size (aka. block size), once set this is difficult to change - # - # I/O from the LVM Logical Volume to the LVM Volume Group will use the extent size defined - # by the LVM Volume Group, starting at the point defined by the LVM Volume Group data alignment offset - # - # Therefore the LVM Volume Group extent size acts as the block size from LVM virtualization to the physical disks - #### - - echo "Creating $lvm_volume_group_name volume group with $(echo $lvm_volume_group_target_list | tr -d '\n'), using $lvm_volume_group_data_alignment data alignment and $lvm_volume_group_physical_extent_size extent size (block size)" - vgcreate --dataalignment $lvm_volume_group_data_alignment --physicalextentsize $lvm_volume_group_physical_extent_size $lvm_volume_group_name $(echo $lvm_volume_group_target_list | tr -d '\n') - echo "" - - ####### - # Create expandable LVM Logical Volume, using single or multiple physical disk volumes - # Default is 64K for the stripe size (aka. block size) - # - # I/O from the OS/Applications to the LVM Logical Volume will use the stripe size defined - # - # Therefore the LVM Logical Volume stripe size acts as the block size from OS to LVM virtualization - # IMPORTANT: Correct setting of this stripe size has impact on performance of OS and Applications read/write - ####### - - # Count number of LVM Physical Volumes in the LVM Volume Group - count_physical_volumes=$(echo "$lvm_volume_group_target_list" | wc -w) - - # Create LVM Logical Volume - # Stripe across all LVM Physical Volumes available in the LVM Volume Group - echo "Creating $lvm_logical_volume_name logical volume for $lvm_volume_group_name volume group, using $lvm_logical_volume_stripe_size extent size (block size)" - lvcreate $lvm_volume_group_name --yes --extents "100%FREE" --stripesize $lvm_logical_volume_stripe_size --stripes $count_physical_volumes --name "$lvm_logical_volume_name" - echo "" - - - ####### - # Create File System formatting for the LVM Logical Volume - # Filesystem is either XFS or EXT4 - ####### - - echo "Create File System formatting for the LVM Logical Volume" - mkfs.$filesystem_format "/dev/$lvm_volume_group_name/$lvm_logical_volume_name" - echo "" - - - ####### - # Permenant mount point - ####### - - # Note: After enabling multipath on the Linux host and rebooting the system, disk paths might appear in “/dev/UUID” form with a unique alphanumeric identifier. - # This can be seen by using the “lsblk” command on Linux. The preferred method is to use this disk path as opposed to the “/dev/sdX” path when formatting and mounting file systems. - - # Note: When adding an /etc/fstab entry for iSCSI based disk devices, use the “_netdev” mount option to ensure - # that the network link is ready before the operating system attempts to mount the disk. - - echo "Create fstab entries for $lvm_volume_group_name" - echo "# fstab entries for $lvm_volume_group_name" >> /etc/fstab - echo "/dev/$lvm_volume_group_name/$lvm_logical_volume_name $mount_point $filesystem_format defaults,noatime 0 0" >> /etc/fstab - echo "" - -} - - - - -############################################# -# Physical Volume Partition formatting -############################################# - -function physical_volume_partition_runner() { - - mount_point="$1" - disk_capacity_gb_specified="$2" - physical_partition_filesystem_block_size="$3" - physical_partition_name="$4" - filesystem_format="$5" - - # Ensure directory is available - mkdir --parents $mount_point - - # Clear any previous data entries on previously formatted disks - unset existing_disks_list - unset lvm_volume_group_target_list - unset physical_disks_list_with_gigabytes - - # Find existing disk devices and partitions - for disk in $(blkid -o device) - do - existing_disk_no_partition=$(echo "$disk" | sed 's/[0-9]\+$//') - export existing_disks_list=$(echo $existing_disk_no_partition & echo $existing_disks_list) - unset existing_disk_no_partition - done - - # Run calculations - physical_disks_list=$(lsblk --nodeps --bytes --noheadings -io KNAME,FSTYPE | awk 'BEGIN{OFS="\t"} {if (FNR>1 && $2 = "") print "/dev/"$1; else print $0}') - physical_disks_list_with_megabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024; else print $0}') - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - echo "$physical_disks_list_with_gigabytes" > $HOME/physical_disks_list_with_gigabytes.txt - - - if [[ $filesystem_format == "xfs" ]] - then - echo "#### XFS on Linux supports only filesystems with block sizes EQUAL to the system page size. ####" - echo "#### The disk can be formatted with up to 64 KiB, however it will fail to mount with the following error ####" - echo "# mount(2) system call failed: Function not implemented." - echo "" - echo "#### The default page size is hardcoded and cannot be changed. ####" - echo "" - echo "#### Red Hat KB: What is the maximum supported XFS block size in RHEL? - https://access.redhat.com/solutions/1614393 ####" - echo "#### Red Hat KB: Is it possible to change Page Size in Red Hat Enterprise Linux? - https://access.redhat.com/solutions/4854441 ####" - echo "" - echo "Page Size currently set to:" - getconf PAGESIZE - echo "" - fi - - page_size=$(getconf PAGESIZE) - - if [[ $filesystem_format == "xfs" ]] && [[ $(( page_size/1024 )) != $(echo $physical_partition_filesystem_block_size | sed 's/[^0-9]*//g') ]] - then - echo "Requested XFS Block Sizes are not equal to the Page Size, amend to Page Size" - echo "$mount_point requested as xfs with block size $physical_partition_filesystem_block_size, resetting to $page_size" - block_size_definition=$page_size - else - block_size_definition=$physical_partition_filesystem_block_size - fi - - - # Mount options for filesystem table. - # With only 4 KiB Page Size, only 2 in-memory log buffers are available so increase to each buffer's size (default 32kc) may increase performance - mount_options="defaults,noatime" - #mount_options="defaults,logbsize=256k" - - # Workaround to while running in subshell and inability to re-use variables (the volume group target lists) - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $disk_capacity_gb_specified ]] - then - echo "Creating Whole Disk Physical Volume Partition and File System for /dev/$disk_id at $mount_point with GPT Partition Table, start at 1MiB" - parted --script /dev/$disk_id \ - mklabel gpt \ - mkpart primary $filesystem_format 1MiB 100% \ - name 1 $physical_partition_name - echo "Format Disk Partition with File System, with block size $block_size_definition" - mkfs.$${filesystem_format} -f -b size=$block_size_definition /dev/$disk_id - echo "Write Mount Points to Linux File System Table" - PhysicalDiskUUID=$(blkid /dev/$disk_id -sUUID -ovalue) - echo "UUID=$PhysicalDiskUUID $mount_point $${filesystem_format} $mount_options 0 0"\ >> /etc/fstab - echo "" - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Swap file or partition -############################################# - -function create_swap_file() { - - echo "Create swapfile" - - swap_gb="$1" - swap_bs="128" - - swap_calc_bs=$swap_bs"M" - swap_calc_count="$((x=$swap_gb*1024,x/$swap_bs))" - dd if=/dev/zero of=/swapfile bs=$swap_calc_bs count=$swap_calc_count - chmod 600 /swapfile - mkswap /swapfile - swapon /swapfile - echo '/swapfile swap swap defaults 0 0' >> /etc/fstab - swapon --show - free -h - -} - - -function create_swap_partition() { - - find_swap_partition_by_size="$1" - - physical_disks_list_with_gigabytes=$(lsblk --nodeps --bytes --noheadings -io KNAME,SIZE | awk 'BEGIN{OFS="\t"} {if (FNR>1) print $1,$2/1024/1024/1024; else print $0}') - - while IFS= read -r line - do - disk_id=$(echo $line | awk '{ print $1}') - disk_capacity_gb=$(echo $line | awk '{ print $2}') - if [[ $existing_disks_list = *"$disk_id"* ]] - then - echo "No action on existing formatted /dev/$disk_id" - elif [[ $disk_capacity_gb = $find_swap_partition_by_size ]] - then - echo "Create swap partition" - mkswap /dev/$disk_id - swapon /dev/$disk_id - echo "/dev/$disk_id swap swap defaults 0 0" >> /etc/fstab - swapon --show - free -h - echo "" - break - fi - done <<< "$(echo -e "$physical_disks_list_with_gigabytes")" - -} - - - - -############################################# -# Verify/Debug -############################################# - -storage_debug="false" - -function storage_debug_run() { - -if [ "$storage_debug" == "true" ] -then - - echo "--- Show Mount points ---" - df -h - printf "\n----------------\n\n" - - echo "--- Show /etc/fstab file ---" - cat /etc/fstab - printf "\n----------------\n\n" - - echo "--- Show Block devices ---" - blkid - printf "\n----------------\n\n" - - echo "--- Show Block devices information ---" - lsblk -o NAME,MAJ:MIN,RM,SIZE,RO,TYPE,MOUNTPOINT,PHY-SEC,LOG-SEC - printf "\n----------------\n\n" - - echo "--- Show Hardware List of Disks and Volumes ---" - lshw -class disk -class volume - ###lshw -json -class disk -class volume | jq '[.logicalname, .configuration.sectorsize, .configuration.logicalsectorsize]' - ###tail -n +1 /sys/block/vd*/queue/*_block_size - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes ---" - pvs - # pvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Physical Volumes information ---" - pvdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups ---" - vgs - # vgs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Volume Groups information ---" - vgdisplay - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes ---" - lvs - # lvs -v - printf "\n----------------\n\n" - - echo "--- Show LVM Logical Volumes information ---" - lvdisplay - printf "\n----------------\n\n" - -fi - -} - - - - -############################################# -# MAIN -############################################# - -function main() { - - check_os_distribution - - # Bash Functions use logic of "If injected Terraform value is true (i.e. LVM is used for the mount point) then run Bash Function". - # Ensure Bash Function is called with quotes surrounding Bash Variable of list, otherwise will expand and override other Bash Function Arguments - - printf "\n----------------\n\n" - echo '--- Rescan SCSI bus for new SCSI/iSCSI devices ---' - /usr/bin/rescan-scsi-bus.sh - printf "\n----------------\n\n" - - #echo 'Install jq' - #if [ "$os_type" = "rhel" ] ; then yum --assumeyes --debuglevel=1 install jq ; elif [ "$os_type" = "sles" ] ; then zypper install --no-confirm jq ; fi - ##web_proxy_ip_port=$(echo ${var.module_var_web_proxy_url} | awk -F '^http[s]?://' '{print $2}') - ##if [ ! -f /usr/local/bin/jq ]; then curl -L --proxy $web_proxy_ip_port 'https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64' -o jq && chmod +x jq && mv jq /usr/local/bin; fi - - # Create the required directories - mkdir --parents /hana/{shared,data,log} --mode 755 - mkdir --parents /usr/sap --mode 755 - mkdir --parents /sapmnt --mode 755 - - - # If any mount point uses LVM. i.e. IF with OR operator - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] || [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] || [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_install - fi - - - if [[ ${var.module_var_disk_volume_count_hana_data} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_data}" == "true" ]] - then - lvm_filesystem_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_lvm_pv_data_alignment_hana_data}" "vg_hana_data" "${var.module_var_lvm_vg_data_alignment_hana_data}" "${var.module_var_lvm_vg_physical_extent_size_hana_data}" "${var.module_var_lvm_lv_stripe_size_hana_data}" "${var.module_var_filesystem_hana_data}" - - elif [[ "${var.module_var_lvm_enable_hana_data}" == "false" ]] - then - physical_volume_partition_runner "/hana/data" "${var.module_var_disk_volume_capacity_hana_data}" "${var.module_var_physical_partition_filesystem_block_size_hana_data}" "hana_data" "${var.module_var_filesystem_hana_data}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_log} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_log}" == "true" ]] - then - lvm_filesystem_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_lvm_pv_data_alignment_hana_log}" "vg_hana_log" "${var.module_var_lvm_vg_data_alignment_hana_log}" "${var.module_var_lvm_vg_physical_extent_size_hana_log}" "${var.module_var_lvm_lv_stripe_size_hana_log}" "${var.module_var_filesystem_hana_log}" - - elif [[ "${var.module_var_lvm_enable_hana_log}" == "false" ]] - then - physical_volume_partition_runner "/hana/log" "${var.module_var_disk_volume_capacity_hana_log}" "${var.module_var_physical_partition_filesystem_block_size_hana_log}" "hana_log" "${var.module_var_filesystem_hana_log}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_hana_shared} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_hana_shared}" == "true" ]] - then - lvm_filesystem_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_lvm_pv_data_alignment_hana_shared}" "vg_hana_shared" "${var.module_var_lvm_vg_data_alignment_hana_shared}" "${var.module_var_lvm_vg_physical_extent_size_hana_shared}" "${var.module_var_lvm_lv_stripe_size_hana_shared}" "${var.module_var_filesystem_hana_shared}" - - elif [[ "${var.module_var_lvm_enable_hana_shared}" == "false" ]] - then - physical_volume_partition_runner "/hana/shared" "${var.module_var_disk_volume_capacity_hana_shared}" "${var.module_var_physical_partition_filesystem_block_size_hana_shared}" "hana_shared" "${var.module_var_filesystem_hana_shared}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_anydb} -gt 0 ]] - then - if [[ "${var.module_var_lvm_enable_anydb}" == "true" ]] - then - lvm_filesystem_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_lvm_pv_data_alignment_anydb}" "vg_anydb" "${var.module_var_lvm_vg_data_alignment_anydb}" "${var.module_var_lvm_vg_physical_extent_size_anydb}" "${var.module_var_lvm_lv_stripe_size_anydb}" "${var.module_var_filesystem_anydb}" - - elif [[ "${var.module_var_lvm_enable_anydb}" == "false" ]] - then - physical_volume_partition_runner "${var.module_var_filesystem_mount_path_anydb}" "${var.module_var_disk_volume_capacity_anydb}" "${var.module_var_physical_partition_filesystem_block_size_anydb}" "anydb" "${var.module_var_filesystem_anydb}" - fi - fi - - - if [[ ${var.module_var_disk_volume_count_usr_sap} -gt 0 ]] - then - physical_volume_partition_runner "/usr/sap" "${var.module_var_disk_volume_capacity_usr_sap}" "4k" "usr_sap" "${var.module_var_filesystem_usr_sap}" - fi - - - if [[ ${var.module_var_disk_volume_count_sapmnt} -gt 0 ]] - then - physical_volume_partition_runner "/sapmnt" "${var.module_var_disk_volume_capacity_sapmnt}" "4k" "sapmnt" "${var.module_var_filesystem_sapmnt}" - fi - - - if [[ ${var.module_var_disk_swapfile_size_gb} -gt 0 ]] - then - create_swap_file "${var.module_var_disk_swapfile_size_gb}" - else - create_swap_partition "${var.module_var_disk_volume_capacity_swap}" - fi - - - physical_volume_partition_runner "${var.module_var_sap_software_download_directory}" "${var.module_var_disk_volume_capacity_software}" "4k" "software" "xfs" - - - mount -a - -} - - -# Run script by calling 'main' Bash Function -main - - -EOF - } - -} diff --git a/vmware_vm/host_provision/host.tf b/vmware_vm/host_provision/host.tf index 621ac59..b370802 100644 --- a/vmware_vm/host_provision/host.tf +++ b/vmware_vm/host_provision/host.tf @@ -26,14 +26,7 @@ data "template_file" "cloud_init_user_data" { resource "vsphere_virtual_machine" "host_provision" { depends_on = [ - vsphere_virtual_disk.virtual_disk_hana_data, - vsphere_virtual_disk.virtual_disk_hana_log, - vsphere_virtual_disk.virtual_disk_hana_shared, - vsphere_virtual_disk.virtual_disk_anydb, - vsphere_virtual_disk.virtual_disk_usr_sap, - vsphere_virtual_disk.virtual_disk_sapmnt, - vsphere_virtual_disk.virtual_disk_swap, - vsphere_virtual_disk.virtual_disk_software + vsphere_virtual_disk.virtual_disk_provision ] name = var.module_var_vmware_vm_hostname @@ -89,7 +82,7 @@ resource "vsphere_virtual_machine" "host_provision" { dynamic "disk" { for_each = [ - for virtual_disks in concat(vsphere_virtual_disk.virtual_disk_hana_data,vsphere_virtual_disk.virtual_disk_hana_log,vsphere_virtual_disk.virtual_disk_hana_shared,vsphere_virtual_disk.virtual_disk_anydb,vsphere_virtual_disk.virtual_disk_usr_sap,vsphere_virtual_disk.virtual_disk_sapmnt,vsphere_virtual_disk.virtual_disk_swap,vsphere_virtual_disk.virtual_disk_software) : { + for virtual_disks in concat(vsphere_virtual_disk.virtual_disk_provision) : { path = virtual_disks.vmdk_path # size = virtual_disks.size } diff --git a/vmware_vm/host_provision/host_storage.tf b/vmware_vm/host_provision/host_storage.tf index 16d1e77..f39fda6 100644 --- a/vmware_vm/host_provision/host_storage.tf +++ b/vmware_vm/host_provision/host_storage.tf @@ -2,147 +2,25 @@ # To enable the dynamic block for disks attachment to the VMware Virtual Machine, must use count on each Virtual Disk # When using count = 1, the Virtual Disk is returned as a set. Without count it is returned as an object and will fail the for loop on the dynamic block -resource "vsphere_virtual_disk" "virtual_disk_hana_data" { - count = var.module_var_disk_volume_count_hana_data - datacenter = data.vsphere_datacenter.datacenter.name - datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-hana-data${count.index}.vmdk" - create_directories = true - - size = var.module_var_disk_volume_capacity_hana_data - type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) - - lifecycle { - ignore_changes = [ - type - ] - } - -} - - -resource "vsphere_virtual_disk" "virtual_disk_hana_log" { - count = var.module_var_disk_volume_count_hana_log - datacenter = data.vsphere_datacenter.datacenter.name - datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-hana-log${count.index}.vmdk" - create_directories = true - - size = var.module_var_disk_volume_capacity_hana_log - type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) - - lifecycle { - ignore_changes = [ - type - ] - } - -} - - -resource "vsphere_virtual_disk" "virtual_disk_hana_shared" { - count = var.module_var_disk_volume_count_hana_shared - datacenter = data.vsphere_datacenter.datacenter.name - datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-hana-shared${count.index}.vmdk" - create_directories = true - - size = var.module_var_disk_volume_capacity_hana_shared - type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) - - lifecycle { - ignore_changes = [ - type - ] - } - -} - - -resource "vsphere_virtual_disk" "virtual_disk_anydb" { - count = var.module_var_disk_volume_count_anydb - datacenter = data.vsphere_datacenter.datacenter.name - datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-anydb${count.index}.vmdk" - create_directories = true - - size = var.module_var_disk_volume_capacity_anydb - type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) - - lifecycle { - ignore_changes = [ - type - ] - } - -} - - -resource "vsphere_virtual_disk" "virtual_disk_usr_sap" { - count = var.module_var_disk_volume_count_usr_sap // Must be no more than 1 - datacenter = data.vsphere_datacenter.datacenter.name - datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-usr-sap${count.index}.vmdk" - create_directories = true - - size = var.module_var_disk_volume_capacity_usr_sap - type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) +resource "vsphere_virtual_disk" "virtual_disk_provision" { - lifecycle { - ignore_changes = [ - type - ] + for_each = { + for disk in flatten( + [ for storage_item in var.module_var_storage_definition: + [ for index, count in range(0,try(storage_item.disk_count,1)) : + tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) + ] + ] + ): + disk.name => disk } -} - - -resource "vsphere_virtual_disk" "virtual_disk_sapmnt" { - count = var.module_var_disk_volume_count_sapmnt // Must be no more than 1 - datacenter = data.vsphere_datacenter.datacenter.name - datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-sapmnt.vmdk" - create_directories = true - - size = var.module_var_disk_volume_capacity_sapmnt - type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) - - lifecycle { - ignore_changes = [ - type - ] - } - -} - - -resource "vsphere_virtual_disk" "virtual_disk_swap" { - count = var.module_var_disk_volume_count_swap // Must be no more than 1 - datacenter = data.vsphere_datacenter.datacenter.name - datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-swap.vmdk" - create_directories = true - - size = var.module_var_disk_volume_capacity_swap - type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) - - lifecycle { - ignore_changes = [ - type - ] - } - -} - - -resource "vsphere_virtual_disk" "virtual_disk_software" { - count = 1 // Must be no more than 1 datacenter = data.vsphere_datacenter.datacenter.name datastore = data.vsphere_datastore.datastore.name - vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-software.vmdk" + vmdk_path = "/${var.module_var_vmware_vm_hostname}_data/${var.module_var_vmware_vm_hostname}-${each.value.name}.vmdk" create_directories = true - size = var.module_var_disk_volume_capacity_software + size = each.value.disk_size type = "lazy" # Thick Provision Lazy Zeroed (allocate then zero on first write) lifecycle { diff --git a/vmware_vm/host_provision/module_variables.tf b/vmware_vm/host_provision/module_variables.tf index d1319e6..94a8b3f 100644 --- a/vmware_vm/host_provision/module_variables.tf +++ b/vmware_vm/host_provision/module_variables.tf @@ -62,121 +62,4 @@ variable "module_var_os_systems_mgmt_host" { default = "" } - -variable "module_var_disk_volume_count_hana_data" {} -variable "module_var_disk_volume_capacity_hana_data" {} -variable "module_var_lvm_enable_hana_data" {} -variable "module_var_lvm_pv_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_data" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_data" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_data" { - default = "64K" -} -variable "module_var_filesystem_hana_data" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_data" {} - - -variable "module_var_disk_volume_count_hana_log" {} -variable "module_var_disk_volume_capacity_hana_log" {} -variable "module_var_lvm_enable_hana_log" {} -variable "module_var_lvm_pv_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_log" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_log" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_log" { - default = "64K" -} -variable "module_var_filesystem_hana_log" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_log" {} - - -variable "module_var_disk_volume_count_hana_shared" {} -variable "module_var_disk_volume_capacity_hana_shared" {} -variable "module_var_lvm_enable_hana_shared" {} -variable "module_var_lvm_pv_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_hana_shared" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_hana_shared" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_hana_shared" { - default = "64K" -} -variable "module_var_filesystem_hana_shared" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_hana_shared" {} - - -variable "module_var_disk_volume_count_anydb" {} -variable "module_var_disk_volume_capacity_anydb" {} -variable "module_var_disk_volume_iops_anydb" { - default = null -} -variable "module_var_lvm_enable_anydb" { - default = false -} -variable "module_var_lvm_pv_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_data_alignment_anydb" { - default = "1M" -} -variable "module_var_lvm_vg_physical_extent_size_anydb" { - default = "4M" -} -variable "module_var_lvm_lv_stripe_size_anydb" { - default = "64K" -} -variable "module_var_filesystem_mount_path_anydb" { -} -variable "module_var_filesystem_anydb" { - default = "xfs" -} -variable "module_var_physical_partition_filesystem_block_size_anydb" { - default = "4k" -} - - -variable "module_var_disk_volume_count_usr_sap" {} -variable "module_var_disk_volume_capacity_usr_sap" {} -variable "module_var_filesystem_usr_sap" { - default = "xfs" -} - -variable "module_var_disk_volume_count_sapmnt" {} -variable "module_var_disk_volume_capacity_sapmnt" {} -variable "module_var_filesystem_sapmnt" { - default = "xfs" -} - -variable "module_var_disk_swapfile_size_gb" {} -variable "module_var_disk_volume_count_swap" {} -variable "module_var_disk_volume_capacity_swap" {} -variable "module_var_filesystem_swap" { - default = "xfs" -} - -variable "module_var_sap_software_download_directory" { - default = "/software" -} -variable "module_var_disk_volume_capacity_software" { -} +variable "module_var_storage_definition" {} From 2b88323e98df28c385e203f3c91d0ed1362b4139 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Fri, 8 Dec 2023 18:27:46 +0000 Subject: [PATCH 13/25] fix: deprecated resource name --- ibmcloud_vs/host_nfs/host_file_storage.tf | 2 +- ibmcloud_vs/host_nfs/module_outputs.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ibmcloud_vs/host_nfs/host_file_storage.tf b/ibmcloud_vs/host_nfs/host_file_storage.tf index 4a4ee22..5ebf0e3 100644 --- a/ibmcloud_vs/host_nfs/host_file_storage.tf +++ b/ibmcloud_vs/host_nfs/host_file_storage.tf @@ -10,7 +10,7 @@ resource "ibm_is_share" "file_storage_sapmnt" { } -resource "ibm_is_share_target" "file_storage_attach_sapmnt" { +resource "ibm_is_share_mount_target" "file_storage_attach_sapmnt" { count = var.module_var_nfs_boolean_sapmnt ? 1 : 0 name = "${var.module_var_resource_prefix}-nfs-sapmnt-attach" diff --git a/ibmcloud_vs/host_nfs/module_outputs.tf b/ibmcloud_vs/host_nfs/module_outputs.tf index ce84f4f..adce680 100644 --- a/ibmcloud_vs/host_nfs/module_outputs.tf +++ b/ibmcloud_vs/host_nfs/module_outputs.tf @@ -1,5 +1,5 @@ # Mount path output "output_nfs_fqdn" { - value = ibm_is_share_target.file_storage_attach_sapmnt[0].mount_path + value = ibm_is_share_mount_target.file_storage_attach_sapmnt[0].mount_path } From 15c94c3742f0fbaeaab8c9d3f8c4344be063df08 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Fri, 8 Dec 2023 23:12:19 +0000 Subject: [PATCH 14/25] fix: profile change --- ibmcloud_vs/host_nfs/host_file_storage.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ibmcloud_vs/host_nfs/host_file_storage.tf b/ibmcloud_vs/host_nfs/host_file_storage.tf index 5ebf0e3..e58dc16 100644 --- a/ibmcloud_vs/host_nfs/host_file_storage.tf +++ b/ibmcloud_vs/host_nfs/host_file_storage.tf @@ -6,7 +6,7 @@ resource "ibm_is_share" "file_storage_sapmnt" { zone = local.target_vpc_availability_zone size = 2048 - profile = "tier-3iops" + profile = "dp2" } From 17f7f22efce67f1305e90354c13ae8752c88a458 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Sat, 9 Dec 2023 01:27:22 +0000 Subject: [PATCH 15/25] fix: bad ref --- ibmcloud_vs/host_nfs/module_outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ibmcloud_vs/host_nfs/module_outputs.tf b/ibmcloud_vs/host_nfs/module_outputs.tf index adce680..e3e6c8d 100644 --- a/ibmcloud_vs/host_nfs/module_outputs.tf +++ b/ibmcloud_vs/host_nfs/module_outputs.tf @@ -1,5 +1,5 @@ # Mount path output "output_nfs_fqdn" { - value = ibm_is_share_mount_target.file_storage_attach_sapmnt[0].mount_path + value = try(ibm_is_share_mount_target.file_storage_attach_sapmnt[0].mount_path,"") } From 7b215d43da60e78e88446d92d26f628598f5be0e Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Tue, 30 Jan 2024 14:44:46 +0000 Subject: [PATCH 16/25] fix: swap path --- aws_ec2_instance/host_provision/host_block_storage.tf | 2 +- gcp_ce_vm/host_provision/host_block_storage.tf | 2 +- ibmcloud_powervs/host_provision/host_block_storage.tf | 2 +- ibmcloud_vs/host_provision/host_block_storage.tf | 2 +- msazure_vm/host_provision/host_block_storage.tf | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/aws_ec2_instance/host_provision/host_block_storage.tf b/aws_ec2_instance/host_provision/host_block_storage.tf index 3518c08..90c12b9 100644 --- a/aws_ec2_instance/host_provision/host_block_storage.tf +++ b/aws_ec2_instance/host_provision/host_block_storage.tf @@ -9,7 +9,7 @@ resource "aws_ebs_volume" "block_volume_provision" { [ for storage_item in var.module_var_storage_definition: [ for index, count in range(0,try(storage_item.disk_count,1)) : tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) - ] + ] if try(storage_item.swap_path,"") == "" ] ): disk.name => disk diff --git a/gcp_ce_vm/host_provision/host_block_storage.tf b/gcp_ce_vm/host_provision/host_block_storage.tf index 2858b8a..5eb7b2e 100644 --- a/gcp_ce_vm/host_provision/host_block_storage.tf +++ b/gcp_ce_vm/host_provision/host_block_storage.tf @@ -14,7 +14,7 @@ resource "google_compute_disk" "block_volume" { [ for storage_item in var.module_var_storage_definition: [ for index, count in range(0,try(storage_item.disk_count,1)) : tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) - ] + ] if try(storage_item.swap_path,"") == "" ] ): disk.name => disk diff --git a/ibmcloud_powervs/host_provision/host_block_storage.tf b/ibmcloud_powervs/host_provision/host_block_storage.tf index 83421fe..41f1109 100644 --- a/ibmcloud_powervs/host_provision/host_block_storage.tf +++ b/ibmcloud_powervs/host_provision/host_block_storage.tf @@ -11,7 +11,7 @@ resource "ibm_pi_volume" "block_volume_provision" { [ for storage_item in var.module_var_storage_definition: [ for index, count in range(0,try(storage_item.disk_count,1)) : tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) - ] + ] if try(storage_item.swap_path,"") == "" ] ): disk.name => disk diff --git a/ibmcloud_vs/host_provision/host_block_storage.tf b/ibmcloud_vs/host_provision/host_block_storage.tf index aeee573..ad81cfd 100644 --- a/ibmcloud_vs/host_provision/host_block_storage.tf +++ b/ibmcloud_vs/host_provision/host_block_storage.tf @@ -11,7 +11,7 @@ resource "ibm_is_volume" "block_volume_provision" { [ for storage_item in var.module_var_storage_definition: [ for index, count in range(0,try(storage_item.disk_count,1)) : tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) - ] + ] if try(storage_item.swap_path,"") == "" ] ): disk.name => disk diff --git a/msazure_vm/host_provision/host_block_storage.tf b/msazure_vm/host_provision/host_block_storage.tf index b6e4a83..24d05c5 100644 --- a/msazure_vm/host_provision/host_block_storage.tf +++ b/msazure_vm/host_provision/host_block_storage.tf @@ -9,7 +9,7 @@ resource "azurerm_managed_disk" "block_volume" { [ for storage_item in var.module_var_storage_definition: [ for index, count in range(0,try(storage_item.disk_count,1)) : tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) - ] + ] if try(storage_item.swap_path,"") == "" ] ): disk.name => disk From b3a4f82d665c0630bc228ca9a87bc947ac72834d Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Tue, 30 Jan 2024 14:45:35 +0000 Subject: [PATCH 17/25] fix: swap path hyp --- ibmpowervc/host_provision/host_storage.tf | 2 +- vmware_vm/host_provision/host_storage.tf | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ibmpowervc/host_provision/host_storage.tf b/ibmpowervc/host_provision/host_storage.tf index 4ebdff7..0067ba4 100644 --- a/ibmpowervc/host_provision/host_storage.tf +++ b/ibmpowervc/host_provision/host_storage.tf @@ -9,7 +9,7 @@ resource "openstack_blockstorage_volume_v2" "block_volume_provision" { [ for storage_item in var.module_var_storage_definition: [ for index, count in range(0,try(storage_item.disk_count,1)) : tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) - ] + ] if try(storage_item.swap_path,"") == "" ] ): disk.name => disk diff --git a/vmware_vm/host_provision/host_storage.tf b/vmware_vm/host_provision/host_storage.tf index f39fda6..fa24a1b 100644 --- a/vmware_vm/host_provision/host_storage.tf +++ b/vmware_vm/host_provision/host_storage.tf @@ -9,7 +9,7 @@ resource "vsphere_virtual_disk" "virtual_disk_provision" { [ for storage_item in var.module_var_storage_definition: [ for index, count in range(0,try(storage_item.disk_count,1)) : tomap({"name" = replace("${storage_item.name}-${index}","_","-"), "disk_type" = try(storage_item.disk_type, null), "disk_size" = storage_item.disk_size, "disk_iops" = try(storage_item.disk_iops,null)}) - ] + ] if try(storage_item.swap_path,"") == "" ] ): disk.name => disk From 32d8ae61f11589ad75f54673b1d9e81b5cc29c24 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Tue, 30 Jan 2024 14:46:10 +0000 Subject: [PATCH 18/25] fix: playbook notes --- .../ansible_playbook.yml | 59 +------------------ 1 file changed, 1 insertion(+), 58 deletions(-) diff --git a/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml b/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml index aa14439..d0612b4 100644 --- a/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml +++ b/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml @@ -1,66 +1,9 @@ --- -# Ansible Playbook for SAP S/4HANA distributed installation using SAP Maintenance Planner +# Ansible Playbook for SAP S/4HANA distributed installation using SAP Maintenance Planner # Use include_role inside Task block, instead of using roles declaration or Task block with import_roles. # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation - -#### Preflight checks #### - -#- name: Ansible Play for verifying infrastructure for distributed installation -# hosts: nwas_ascs, nwas_pas, nwas_aas -# become: true -# any_errors_fatal: true # https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#aborting-a-play-on-all-hosts -# tasks: - -# - name: Check SAP Mount Directory (/sapmnt) is NFS on each SAP NetWeaver host - -# - name: Ensure SAP Common Transport Directory symlink (/usr/sap/trans > /sapmnt/trans) exists - -# - name: Check SAP Common Transport Directory is NFS on each SAP NetWeaver host - -# - name: Check interconnectivity between hosts - SAP NetWeaver ASCS -#32{{sap_swpm_ascs_instance_nr}} -#36{{sap_swpm_ascs_instance_nr}} -#39{{sap_swpm_ascs_instance_nr}} -#5{{sap_swpm_ascs_instance_nr}}13 -#5{{sap_swpm_ascs_instance_nr}}14 - -# - name: Check interconnectivity between hosts - SAP NetWeaver PAS -#32{{sap_swpm_pas_instance_nr}} -#33{{sap_swpm_pas_instance_nr}} -#48{{sap_swpm_pas_instance_nr}} -#5{{sap_swpm_pas_instance_nr}}13 -#5{{sap_swpm_pas_instance_nr}}14 -#80{{sap_swpm_pas_instance_nr}} -#443{{sap_swpm_pas_instance_nr}} - -# - name: Check interconnectivity between hosts - SAP HANA -#5{{sap_swpm_db_instance_nr}}13 -#5{{sap_swpm_db_instance_nr}}14 -#3{{sap_swpm_db_instance_nr}}06 -#3{{sap_swpm_db_instance_nr}}13 -#3{{sap_swpm_db_instance_nr}}15 -#80{{sap_swpm_db_instance_nr}} -#43{{sap_swpm_db_instance_nr}} - -# - name: Check interconnectivity between hosts - SAP Host Agent -#1128 -#1129 - -# - name: Check interconnectivity between hosts - SAP HANA System Replication -#4{{sap_swpm_db_instance_nr}}01 -#4{{sap_swpm_db_instance_nr}}02 -#4{{sap_swpm_db_instance_nr}}06 -#4{{sap_swpm_db_instance_nr}}03 -#4{{sap_swpm_db_instance_nr}}07 -#4{{sap_swpm_db_instance_nr}}40 - 4{{sap_swpm_db_instance_nr}}97 -#2224 -#3121 -#5404-5412 - - - - name: Ansible Play for ensuring rsync on all hosts hosts: hana_primary, nwas_ascs, nwas_pas, nwas_aas become: true From 9b8b756ad3bd90b10640eff6aff802b15f1337f8 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Tue, 30 Jan 2024 14:49:27 +0000 Subject: [PATCH 19/25] fix: append storage def var --- all/ansible_sap_bw4hana_install/module_variables.tf | 4 ++++ all/ansible_sap_ecc_hana_install/module_variables.tf | 4 ++++ all/ansible_sap_ecc_hana_system_copy_hdb/module_variables.tf | 4 ++++ all/ansible_sap_ecc_ibmdb2_install/module_variables.tf | 4 ++++ all/ansible_sap_ecc_oracledb_install/module_variables.tf | 4 ++++ all/ansible_sap_ecc_sapase_install/module_variables.tf | 4 ++++ all/ansible_sap_ecc_sapmaxdb_install/module_variables.tf | 4 ++++ all/ansible_sap_hana_install/module_variables.tf | 4 ++++ all/ansible_sap_nwas_abap_hana_install/module_variables.tf | 4 ++++ all/ansible_sap_nwas_abap_ibmdb2_install/module_variables.tf | 4 ++++ .../module_variables.tf | 4 ++++ all/ansible_sap_nwas_abap_sapase_install/module_variables.tf | 4 ++++ .../module_variables.tf | 4 ++++ all/ansible_sap_nwas_java_ibmdb2_install/module_variables.tf | 4 ++++ all/ansible_sap_nwas_java_sapase_install/module_variables.tf | 4 ++++ .../module_variables.tf | 4 ++++ all/ansible_sap_s4hana_install/module_variables.tf | 4 ++++ all/ansible_sap_s4hana_install_maintplan/module_variables.tf | 4 ++++ all/ansible_sap_s4hana_system_copy_hdb/module_variables.tf | 4 ++++ all/ansible_sap_solman_sapase_install/module_variables.tf | 4 ++++ all/ansible_sap_solman_saphana_install/module_variables.tf | 4 ++++ 21 files changed, 84 insertions(+) diff --git a/all/ansible_sap_bw4hana_install/module_variables.tf b/all/ansible_sap_bw4hana_install/module_variables.tf index 32b21d2..4d17f03 100644 --- a/all/ansible_sap_bw4hana_install/module_variables.tf +++ b/all/ansible_sap_bw4hana_install/module_variables.tf @@ -81,3 +81,7 @@ variable "module_var_sap_swpm_template_selected" {} variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_ecc_hana_install/module_variables.tf b/all/ansible_sap_ecc_hana_install/module_variables.tf index f5ec47f..3956472 100644 --- a/all/ansible_sap_ecc_hana_install/module_variables.tf +++ b/all/ansible_sap_ecc_hana_install/module_variables.tf @@ -77,3 +77,7 @@ variable "module_var_sap_swpm_template_selected" {} variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_ecc_hana_system_copy_hdb/module_variables.tf b/all/ansible_sap_ecc_hana_system_copy_hdb/module_variables.tf index 353b5bc..6238614 100644 --- a/all/ansible_sap_ecc_hana_system_copy_hdb/module_variables.tf +++ b/all/ansible_sap_ecc_hana_system_copy_hdb/module_variables.tf @@ -80,3 +80,7 @@ variable "module_var_sap_swpm_template_selected" {} variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_ecc_ibmdb2_install/module_variables.tf b/all/ansible_sap_ecc_ibmdb2_install/module_variables.tf index f38b388..c082aed 100644 --- a/all/ansible_sap_ecc_ibmdb2_install/module_variables.tf +++ b/all/ansible_sap_ecc_ibmdb2_install/module_variables.tf @@ -74,3 +74,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/db2" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_ecc_oracledb_install/module_variables.tf b/all/ansible_sap_ecc_oracledb_install/module_variables.tf index d6d317c..0aff808 100644 --- a/all/ansible_sap_ecc_oracledb_install/module_variables.tf +++ b/all/ansible_sap_ecc_oracledb_install/module_variables.tf @@ -76,3 +76,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/oracle" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_ecc_sapase_install/module_variables.tf b/all/ansible_sap_ecc_sapase_install/module_variables.tf index 3f2a4a7..3a8526b 100644 --- a/all/ansible_sap_ecc_sapase_install/module_variables.tf +++ b/all/ansible_sap_ecc_sapase_install/module_variables.tf @@ -74,3 +74,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/sybase" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_ecc_sapmaxdb_install/module_variables.tf b/all/ansible_sap_ecc_sapmaxdb_install/module_variables.tf index f417e9a..c52da5d 100644 --- a/all/ansible_sap_ecc_sapmaxdb_install/module_variables.tf +++ b/all/ansible_sap_ecc_sapmaxdb_install/module_variables.tf @@ -74,3 +74,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/sapdb" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_hana_install/module_variables.tf b/all/ansible_sap_hana_install/module_variables.tf index fd25a71..5222fbc 100644 --- a/all/ansible_sap_hana_install/module_variables.tf +++ b/all/ansible_sap_hana_install/module_variables.tf @@ -49,3 +49,7 @@ variable "module_var_sap_hana_install_instance_number" { variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_nwas_abap_hana_install/module_variables.tf b/all/ansible_sap_nwas_abap_hana_install/module_variables.tf index d0ba09a..768a125 100644 --- a/all/ansible_sap_nwas_abap_hana_install/module_variables.tf +++ b/all/ansible_sap_nwas_abap_hana_install/module_variables.tf @@ -78,3 +78,7 @@ variable "module_var_sap_swpm_template_selected" {} variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_nwas_abap_ibmdb2_install/module_variables.tf b/all/ansible_sap_nwas_abap_ibmdb2_install/module_variables.tf index f38b388..c082aed 100644 --- a/all/ansible_sap_nwas_abap_ibmdb2_install/module_variables.tf +++ b/all/ansible_sap_nwas_abap_ibmdb2_install/module_variables.tf @@ -74,3 +74,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/db2" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_nwas_abap_oracledb_install/module_variables.tf b/all/ansible_sap_nwas_abap_oracledb_install/module_variables.tf index d6d317c..0aff808 100644 --- a/all/ansible_sap_nwas_abap_oracledb_install/module_variables.tf +++ b/all/ansible_sap_nwas_abap_oracledb_install/module_variables.tf @@ -76,3 +76,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/oracle" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_nwas_abap_sapase_install/module_variables.tf b/all/ansible_sap_nwas_abap_sapase_install/module_variables.tf index 3f2a4a7..3a8526b 100644 --- a/all/ansible_sap_nwas_abap_sapase_install/module_variables.tf +++ b/all/ansible_sap_nwas_abap_sapase_install/module_variables.tf @@ -74,3 +74,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/sybase" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_nwas_abap_sapmaxdb_install/module_variables.tf b/all/ansible_sap_nwas_abap_sapmaxdb_install/module_variables.tf index f417e9a..c52da5d 100644 --- a/all/ansible_sap_nwas_abap_sapmaxdb_install/module_variables.tf +++ b/all/ansible_sap_nwas_abap_sapmaxdb_install/module_variables.tf @@ -74,3 +74,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/sapdb" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_nwas_java_ibmdb2_install/module_variables.tf b/all/ansible_sap_nwas_java_ibmdb2_install/module_variables.tf index 57bf449..5fefc52 100644 --- a/all/ansible_sap_nwas_java_ibmdb2_install/module_variables.tf +++ b/all/ansible_sap_nwas_java_ibmdb2_install/module_variables.tf @@ -71,3 +71,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/db2" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_nwas_java_sapase_install/module_variables.tf b/all/ansible_sap_nwas_java_sapase_install/module_variables.tf index 5e4138a..15576fc 100644 --- a/all/ansible_sap_nwas_java_sapase_install/module_variables.tf +++ b/all/ansible_sap_nwas_java_sapase_install/module_variables.tf @@ -71,3 +71,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/sybase" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf b/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf index 9c72462..b2739a6 100644 --- a/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf +++ b/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf @@ -87,3 +87,7 @@ variable "module_var_inventory_nwas_ascs" {} variable "module_var_inventory_nwas_pas" {} variable "module_var_inventory_nwas_aas" {} + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_s4hana_install/module_variables.tf b/all/ansible_sap_s4hana_install/module_variables.tf index 32b21d2..4d17f03 100644 --- a/all/ansible_sap_s4hana_install/module_variables.tf +++ b/all/ansible_sap_s4hana_install/module_variables.tf @@ -81,3 +81,7 @@ variable "module_var_sap_swpm_template_selected" {} variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_s4hana_install_maintplan/module_variables.tf b/all/ansible_sap_s4hana_install_maintplan/module_variables.tf index 5629b56..98dcde5 100644 --- a/all/ansible_sap_s4hana_install_maintplan/module_variables.tf +++ b/all/ansible_sap_s4hana_install_maintplan/module_variables.tf @@ -83,3 +83,7 @@ variable "module_var_sap_swpm_template_selected" {} variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_s4hana_system_copy_hdb/module_variables.tf b/all/ansible_sap_s4hana_system_copy_hdb/module_variables.tf index 353b5bc..6238614 100644 --- a/all/ansible_sap_s4hana_system_copy_hdb/module_variables.tf +++ b/all/ansible_sap_s4hana_system_copy_hdb/module_variables.tf @@ -80,3 +80,7 @@ variable "module_var_sap_swpm_template_selected" {} variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_solman_sapase_install/module_variables.tf b/all/ansible_sap_solman_sapase_install/module_variables.tf index ad0da2e..7973dbb 100644 --- a/all/ansible_sap_solman_sapase_install/module_variables.tf +++ b/all/ansible_sap_solman_sapase_install/module_variables.tf @@ -78,3 +78,7 @@ variable "module_var_dry_run_test" { variable "module_var_filesystem_mount_path_anydb" { default = "/sybase" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} diff --git a/all/ansible_sap_solman_saphana_install/module_variables.tf b/all/ansible_sap_solman_saphana_install/module_variables.tf index b98ae2d..2559d39 100644 --- a/all/ansible_sap_solman_saphana_install/module_variables.tf +++ b/all/ansible_sap_solman_saphana_install/module_variables.tf @@ -84,3 +84,7 @@ variable "module_var_sap_swpm_template_selected" { variable "module_var_dry_run_test" { default = "" } + +variable "module_var_terraform_host_specification_storage_definition" { + default = {} +} From 73fc939a603631f53274e1cfc203a475fbf18b09 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Tue, 30 Jan 2024 17:16:18 +0000 Subject: [PATCH 20/25] fix: append storage extravars --- .../create_ansible_extravars.tf | 12 ++++++++++++ .../create_ansible_extravars.tf | 13 +++++++++++++ .../create_ansible_extravars.tf | 13 +++++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 9 +++++++++ .../create_ansible_extravars.tf | 13 +++++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 10 ++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 4 ++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 11 +++++++++++ .../create_ansible_extravars.tf | 12 ++++++++++++ 21 files changed, 229 insertions(+) diff --git a/all/ansible_sap_bw4hana_install/create_ansible_extravars.tf b/all/ansible_sap_bw4hana_install/create_ansible_extravars.tf index 1f80755..2dee36d 100644 --- a/all/ansible_sap_bw4hana_install/create_ansible_extravars.tf +++ b/all/ansible_sap_bw4hana_install/create_ansible_extravars.tf @@ -202,5 +202,17 @@ sap_swpm_templates_install_dictionary: # For dual host installation, change the sap_swpm_db_host to appropriate value +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + + EOF } diff --git a/all/ansible_sap_ecc_hana_install/create_ansible_extravars.tf b/all/ansible_sap_ecc_hana_install/create_ansible_extravars.tf index d18e52d..568db3c 100644 --- a/all/ansible_sap_ecc_hana_install/create_ansible_extravars.tf +++ b/all/ansible_sap_ecc_hana_install/create_ansible_extravars.tf @@ -427,5 +427,18 @@ sap_swpm_templates_install_dictionary: - '51050036_17' - '51050036_18' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + + EOF } diff --git a/all/ansible_sap_ecc_hana_system_copy_hdb/create_ansible_extravars.tf b/all/ansible_sap_ecc_hana_system_copy_hdb/create_ansible_extravars.tf index 116f508..a289b5c 100644 --- a/all/ansible_sap_ecc_hana_system_copy_hdb/create_ansible_extravars.tf +++ b/all/ansible_sap_ecc_hana_system_copy_hdb/create_ansible_extravars.tf @@ -259,5 +259,18 @@ sap_swpm_templates_install_dictionary: - 'IMDB_CLIENT20_007_26-80002095.SAR' # SAP HANA Client - 'SAPHOSTAGENT51_51-70002261.SAR' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + + EOF } diff --git a/all/ansible_sap_ecc_ibmdb2_install/create_ansible_extravars.tf b/all/ansible_sap_ecc_ibmdb2_install/create_ansible_extravars.tf index 9a8ccdb..df3b572 100644 --- a/all/ansible_sap_ecc_ibmdb2_install/create_ansible_extravars.tf +++ b/all/ansible_sap_ecc_ibmdb2_install/create_ansible_extravars.tf @@ -373,5 +373,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_ecc_oracledb_install/create_ansible_extravars.tf b/all/ansible_sap_ecc_oracledb_install/create_ansible_extravars.tf index 34aa443..c6c1c87 100644 --- a/all/ansible_sap_ecc_oracledb_install/create_ansible_extravars.tf +++ b/all/ansible_sap_ecc_oracledb_install/create_ansible_extravars.tf @@ -269,5 +269,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_ecc_sapase_install/create_ansible_extravars.tf b/all/ansible_sap_ecc_sapase_install/create_ansible_extravars.tf index 73b9894..ca44042 100644 --- a/all/ansible_sap_ecc_sapase_install/create_ansible_extravars.tf +++ b/all/ansible_sap_ecc_sapase_install/create_ansible_extravars.tf @@ -232,5 +232,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_ecc_sapmaxdb_install/create_ansible_extravars.tf b/all/ansible_sap_ecc_sapmaxdb_install/create_ansible_extravars.tf index d46fdfb..edba615 100644 --- a/all/ansible_sap_ecc_sapmaxdb_install/create_ansible_extravars.tf +++ b/all/ansible_sap_ecc_sapmaxdb_install/create_ansible_extravars.tf @@ -231,5 +231,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_hana_install/create_ansible_extravars.tf b/all/ansible_sap_hana_install/create_ansible_extravars.tf index 875541b..7b33138 100644 --- a/all/ansible_sap_hana_install/create_ansible_extravars.tf +++ b/all/ansible_sap_hana_install/create_ansible_extravars.tf @@ -97,5 +97,14 @@ sap_hana_install_update_etchosts: 'false' # Check the file /defaults/main.yml +sap_storage_setup_sid: "${var.module_var_sap_hana_install_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_nwas_abap_hana_install/create_ansible_extravars.tf b/all/ansible_sap_nwas_abap_hana_install/create_ansible_extravars.tf index fbef576..bd75675 100644 --- a/all/ansible_sap_nwas_abap_hana_install/create_ansible_extravars.tf +++ b/all/ansible_sap_nwas_abap_hana_install/create_ansible_extravars.tf @@ -263,5 +263,18 @@ sap_swpm_templates_install_dictionary: # - '51050829_8' # NW 7.5 Upgrade Export Part II 1/2 # - '51050829_9' # NW 7.5 Upgrade Export Part II 2/2 + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + + EOF } diff --git a/all/ansible_sap_nwas_abap_ibmdb2_install/create_ansible_extravars.tf b/all/ansible_sap_nwas_abap_ibmdb2_install/create_ansible_extravars.tf index 1ff2173..710960e 100644 --- a/all/ansible_sap_nwas_abap_ibmdb2_install/create_ansible_extravars.tf +++ b/all/ansible_sap_nwas_abap_ibmdb2_install/create_ansible_extravars.tf @@ -199,5 +199,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_nwas_abap_oracledb_install/create_ansible_extravars.tf b/all/ansible_sap_nwas_abap_oracledb_install/create_ansible_extravars.tf index e0f399a..579e5af 100644 --- a/all/ansible_sap_nwas_abap_oracledb_install/create_ansible_extravars.tf +++ b/all/ansible_sap_nwas_abap_oracledb_install/create_ansible_extravars.tf @@ -237,5 +237,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_nwas_abap_sapase_install/create_ansible_extravars.tf b/all/ansible_sap_nwas_abap_sapase_install/create_ansible_extravars.tf index f9ccaff..e572245 100644 --- a/all/ansible_sap_nwas_abap_sapase_install/create_ansible_extravars.tf +++ b/all/ansible_sap_nwas_abap_sapase_install/create_ansible_extravars.tf @@ -197,5 +197,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_nwas_abap_sapmaxdb_install/create_ansible_extravars.tf b/all/ansible_sap_nwas_abap_sapmaxdb_install/create_ansible_extravars.tf index 7b39da5..3c774af 100644 --- a/all/ansible_sap_nwas_abap_sapmaxdb_install/create_ansible_extravars.tf +++ b/all/ansible_sap_nwas_abap_sapmaxdb_install/create_ansible_extravars.tf @@ -196,5 +196,15 @@ sap_swpm_templates_install_dictionary: - 'SAPCAR_1115-70006238.EXE' +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_nwas_java_ibmdb2_install/create_ansible_extravars.tf b/all/ansible_sap_nwas_java_ibmdb2_install/create_ansible_extravars.tf index c9daed1..6d9f218 100644 --- a/all/ansible_sap_nwas_java_ibmdb2_install/create_ansible_extravars.tf +++ b/all/ansible_sap_nwas_java_ibmdb2_install/create_ansible_extravars.tf @@ -141,5 +141,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_nwas_java_sapase_install/create_ansible_extravars.tf b/all/ansible_sap_nwas_java_sapase_install/create_ansible_extravars.tf index 4f477cc..89e9238 100644 --- a/all/ansible_sap_nwas_java_sapase_install/create_ansible_extravars.tf +++ b/all/ansible_sap_nwas_java_sapase_install/create_ansible_extravars.tf @@ -140,5 +140,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf b/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf index 1ecce7d..db7493b 100644 --- a/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf +++ b/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf @@ -654,5 +654,9 @@ sap_update_profile_default_profile_params: #sap_update_profile_instance_profile_params: # - icm/server_port_1 = PROT=HTTPS,PORT=443$$,PROCTIMEOUT=600,TIMEOUT=3600 + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_s4hana_install/create_ansible_extravars.tf b/all/ansible_sap_s4hana_install/create_ansible_extravars.tf index d86ae87..7f98ca9 100644 --- a/all/ansible_sap_s4hana_install/create_ansible_extravars.tf +++ b/all/ansible_sap_s4hana_install/create_ansible_extravars.tf @@ -383,5 +383,16 @@ sap_update_profile_default_profile_params: # - icm/server_port_1 = PROT=HTTPS,PORT=443$$,PROCTIMEOUT=600,TIMEOUT=3600 +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_s4hana_install_maintplan/create_ansible_extravars.tf b/all/ansible_sap_s4hana_install_maintplan/create_ansible_extravars.tf index 66fce98..ccc8b9e 100644 --- a/all/ansible_sap_s4hana_install_maintplan/create_ansible_extravars.tf +++ b/all/ansible_sap_s4hana_install_maintplan/create_ansible_extravars.tf @@ -254,5 +254,16 @@ sap_update_profile_default_profile_params: # - icm/server_port_1 = PROT=HTTPS,PORT=443$$,PROCTIMEOUT=600,TIMEOUT=3600 +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_s4hana_system_copy_hdb/create_ansible_extravars.tf b/all/ansible_sap_s4hana_system_copy_hdb/create_ansible_extravars.tf index ab89919..15eeec8 100644 --- a/all/ansible_sap_s4hana_system_copy_hdb/create_ansible_extravars.tf +++ b/all/ansible_sap_s4hana_system_copy_hdb/create_ansible_extravars.tf @@ -374,5 +374,16 @@ sap_swpm_templates_install_dictionary: # For dual host installation, change the sap_swpm_db_host to appropriate value +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_solman_sapase_install/create_ansible_extravars.tf b/all/ansible_sap_solman_sapase_install/create_ansible_extravars.tf index 8e034a3..0b6047c 100644 --- a/all/ansible_sap_solman_sapase_install/create_ansible_extravars.tf +++ b/all/ansible_sap_solman_sapase_install/create_ansible_extravars.tf @@ -203,5 +203,16 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } diff --git a/all/ansible_sap_solman_saphana_install/create_ansible_extravars.tf b/all/ansible_sap_solman_saphana_install/create_ansible_extravars.tf index 55f4020..693a91b 100644 --- a/all/ansible_sap_solman_saphana_install/create_ansible_extravars.tf +++ b/all/ansible_sap_solman_saphana_install/create_ansible_extravars.tf @@ -250,5 +250,17 @@ sap_swpm_templates_install_dictionary: softwarecenter_search_list_ppc64le: - 'SAPCAR_1115-70006238.EXE' + +sap_storage_setup_sid: "${var.module_var_sap_swpm_sid}" + +# hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers +sap_storage_setup_host_type: + - hana_primary + - nwas_abap_ascs + - nwas_abap_pas + +# Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" + EOF } From ec1521fce6d0ebd9d648ca25af44fc0dddda2180 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Tue, 30 Jan 2024 17:28:18 +0000 Subject: [PATCH 21/25] fix: append storage logic --- .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ .../ansible_playbook.yml | 36 +++++++++++++++++++ 20 files changed, 720 insertions(+) diff --git a/all/ansible_sap_bw4hana_install/ansible_playbook.yml b/all/ansible_sap_bw4hana_install/ansible_playbook.yml index dfd6f45..4f3a99b 100644 --- a/all/ansible_sap_bw4hana_install/ansible_playbook.yml +++ b/all/ansible_sap_bw4hana_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_bw4hana_install set_fact: diff --git a/all/ansible_sap_ecc_hana_install/ansible_playbook.yml b/all/ansible_sap_ecc_hana_install/ansible_playbook.yml index 1b1e62c..9f640f0 100644 --- a/all/ansible_sap_ecc_hana_install/ansible_playbook.yml +++ b/all/ansible_sap_ecc_hana_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_ecc_hana_install set_fact: diff --git a/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_playbook.yml b/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_playbook.yml index 4ab736e..c258250 100644 --- a/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_playbook.yml +++ b/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_ecc_syscopy set_fact: diff --git a/all/ansible_sap_ecc_ibmdb2_install/ansible_playbook.yml b/all/ansible_sap_ecc_ibmdb2_install/ansible_playbook.yml index 584055c..fb7f32e 100644 --- a/all/ansible_sap_ecc_ibmdb2_install/ansible_playbook.yml +++ b/all/ansible_sap_ecc_ibmdb2_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_ecc_ibmdb2_install set_fact: diff --git a/all/ansible_sap_ecc_oracledb_install/ansible_playbook.yml b/all/ansible_sap_ecc_oracledb_install/ansible_playbook.yml index dd69ddd..f9be764 100644 --- a/all/ansible_sap_ecc_oracledb_install/ansible_playbook.yml +++ b/all/ansible_sap_ecc_oracledb_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_ecc_oracledb_install set_fact: diff --git a/all/ansible_sap_ecc_sapase_install/ansible_playbook.yml b/all/ansible_sap_ecc_sapase_install/ansible_playbook.yml index ae1255b..b0b8634 100644 --- a/all/ansible_sap_ecc_sapase_install/ansible_playbook.yml +++ b/all/ansible_sap_ecc_sapase_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_ecc_sapase_install set_fact: diff --git a/all/ansible_sap_ecc_sapmaxdb_install/ansible_playbook.yml b/all/ansible_sap_ecc_sapmaxdb_install/ansible_playbook.yml index 4ca0698..532ab51 100644 --- a/all/ansible_sap_ecc_sapmaxdb_install/ansible_playbook.yml +++ b/all/ansible_sap_ecc_sapmaxdb_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_ecc_sapmaxdb_install set_fact: diff --git a/all/ansible_sap_hana_install/ansible_playbook.yml b/all/ansible_sap_hana_install/ansible_playbook.yml index 33d4afe..e27965e 100644 --- a/all/ansible_sap_hana_install/ansible_playbook.yml +++ b/all/ansible_sap_hana_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + - name: Set fact x86_64 softwarecenter_search_list_saphana set_fact: softwarecenter_search_list_saphana: "{{ softwarecenter_search_list_saphana_x86_64 }}" diff --git a/all/ansible_sap_nwas_abap_hana_install/ansible_playbook.yml b/all/ansible_sap_nwas_abap_hana_install/ansible_playbook.yml index daeaba2..bdda62a 100644 --- a/all/ansible_sap_nwas_abap_hana_install/ansible_playbook.yml +++ b/all/ansible_sap_nwas_abap_hana_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_hana_install set_fact: diff --git a/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_playbook.yml b/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_playbook.yml index 24fb4c9..7d67c01 100644 --- a/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_playbook.yml +++ b/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_ibmdb2_install set_fact: diff --git a/all/ansible_sap_nwas_abap_oracledb_install/ansible_playbook.yml b/all/ansible_sap_nwas_abap_oracledb_install/ansible_playbook.yml index 3cf6b89..e0a038a 100644 --- a/all/ansible_sap_nwas_abap_oracledb_install/ansible_playbook.yml +++ b/all/ansible_sap_nwas_abap_oracledb_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_oracledb_install set_fact: diff --git a/all/ansible_sap_nwas_abap_sapase_install/ansible_playbook.yml b/all/ansible_sap_nwas_abap_sapase_install/ansible_playbook.yml index 74ef010..90cea5f 100644 --- a/all/ansible_sap_nwas_abap_sapase_install/ansible_playbook.yml +++ b/all/ansible_sap_nwas_abap_sapase_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_sapase_install set_fact: diff --git a/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_playbook.yml b/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_playbook.yml index 2172421..17fb061 100644 --- a/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_playbook.yml +++ b/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_sapmaxdb_install set_fact: diff --git a/all/ansible_sap_nwas_java_ibmdb2_install/ansible_playbook.yml b/all/ansible_sap_nwas_java_ibmdb2_install/ansible_playbook.yml index f01f9b1..185ed77 100644 --- a/all/ansible_sap_nwas_java_ibmdb2_install/ansible_playbook.yml +++ b/all/ansible_sap_nwas_java_ibmdb2_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_ibmdb2_install set_fact: diff --git a/all/ansible_sap_nwas_java_sapase_install/ansible_playbook.yml b/all/ansible_sap_nwas_java_sapase_install/ansible_playbook.yml index 22ebb0f..ca685f0 100644 --- a/all/ansible_sap_nwas_java_sapase_install/ansible_playbook.yml +++ b/all/ansible_sap_nwas_java_sapase_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_sapase_install set_fact: diff --git a/all/ansible_sap_s4hana_install/ansible_playbook.yml b/all/ansible_sap_s4hana_install/ansible_playbook.yml index 65bd68b..e42958e 100644 --- a/all/ansible_sap_s4hana_install/ansible_playbook.yml +++ b/all/ansible_sap_s4hana_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_s4hana_install set_fact: diff --git a/all/ansible_sap_s4hana_install_maintplan/ansible_playbook.yml b/all/ansible_sap_s4hana_install_maintplan/ansible_playbook.yml index 9e2b601..9f22a4a 100644 --- a/all/ansible_sap_s4hana_install_maintplan/ansible_playbook.yml +++ b/all/ansible_sap_s4hana_install_maintplan/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + - name: Set fact x86_64 softwarecenter_search_list_s4hana_install set_fact: softwarecenter_search_list_s4hana_install: "{{ softwarecenter_search_list_s4hana_install_x86_64 }}" diff --git a/all/ansible_sap_s4hana_system_copy_hdb/ansible_playbook.yml b/all/ansible_sap_s4hana_system_copy_hdb/ansible_playbook.yml index d3e0126..75599d9 100644 --- a/all/ansible_sap_s4hana_system_copy_hdb/ansible_playbook.yml +++ b/all/ansible_sap_s4hana_system_copy_hdb/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_s4hana_syscopy set_fact: diff --git a/all/ansible_sap_solman_sapase_install/ansible_playbook.yml b/all/ansible_sap_solman_sapase_install/ansible_playbook.yml index 33c433f..3da5724 100644 --- a/all/ansible_sap_solman_sapase_install/ansible_playbook.yml +++ b/all/ansible_sap_solman_sapase_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_sapase_install set_fact: diff --git a/all/ansible_sap_solman_saphana_install/ansible_playbook.yml b/all/ansible_sap_solman_saphana_install/ansible_playbook.yml index 1dc2056..2799a14 100644 --- a/all/ansible_sap_solman_saphana_install/ansible_playbook.yml +++ b/all/ansible_sap_solman_saphana_install/ansible_playbook.yml @@ -7,6 +7,42 @@ # This ensures Ansible Roles, and the tasks within, will be parsed in sequence instead of parsing at Playbook initialisation tasks: + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + # Set facts based on the install dictionary and the default template selected - name: Set fact x86_64 softwarecenter_search_list_nw_saphana_install set_fact: From 64a4c5977bc35f653d9947e9e24ad2319fdd82e1 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Wed, 31 Jan 2024 03:20:52 +0000 Subject: [PATCH 22/25] fix: alter input to multi host playbook --- .../ansible_playbook.yml | 191 ++++++++++++++++++ .../create_ansible_extravars.tf | 2 +- .../module_variables.tf | 2 +- 3 files changed, 193 insertions(+), 2 deletions(-) diff --git a/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml b/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml index d0612b4..ebedb81 100644 --- a/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml +++ b/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_playbook.yml @@ -66,6 +66,197 @@ # executable: pip3.6 +- name: Ansible Play for storage setup - SAP HANA + hosts: hana_primary + become: true + any_errors_fatal: true # https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#aborting-a-play-on-all-hosts + tasks: + + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition[inventory_hostname]['storage_definition'] | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + vars: + sap_storage_setup_sid: "{{ sap_swpm_sid }}" + # hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers + sap_storage_setup_host_type: + - hana_primary + + +- name: Ansible Play for storage setup - SAP NetWeaver ASCS + hosts: nwas_ascs + become: true + any_errors_fatal: true # https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#aborting-a-play-on-all-hosts + tasks: + + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition[inventory_hostname]['storage_definition'] | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + vars: + sap_storage_setup_sid: "{{ sap_swpm_sid }}" + # hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers + sap_storage_setup_host_type: + - nwas_abap_ascs + + +- name: Ansible Play for storage setup - SAP NetWeaver PAS + hosts: nwas_pas + become: true + any_errors_fatal: true # https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#aborting-a-play-on-all-hosts + tasks: + + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition[inventory_hostname]['storage_definition'] | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + vars: + sap_storage_setup_sid: "{{ sap_swpm_sid }}" + # hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers + sap_storage_setup_host_type: + - nwas_abap_pas + + +- name: Ansible Play for storage setup - SAP NetWeaver AAS + hosts: nwas_aas + become: true + any_errors_fatal: true # https://docs.ansible.com/ansible/latest/user_guide/playbooks_error_handling.html#aborting-a-play-on-all-hosts + tasks: + + # For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned + # However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes + - name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition + ansible.builtin.set_fact: + sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}" + vars: + converted_element: | + {% set current_element = (convert_item | dict2items) %} + {% set new_element = [] %} + {% for entry in current_element %} + {%- if "disk_count" in entry.key %} + {%- set conv = new_element.extend([ + { + 'key': 'lvm_lv_stripes', + 'value': entry.value, + } + ]) %} + {%- elif not "disk_type" in entry.key %} + {%- set add_entry = new_element.extend([ + { + 'key': entry.key, + 'value': entry.value, + } + ]) %} + {%- endif -%} + {% endfor %} + {{ new_element | items2dict }} + loop: "{{ terraform_host_specification_storage_definition[inventory_hostname]['storage_definition'] | list }}" + loop_control: + loop_var: convert_item + label: "{{ convert_item.name }}" + + - name: Execute Ansible Role sap_storage_setup + include_role: + name: { role: community.sap_install.sap_storage_setup } + vars: + sap_storage_setup_sid: "{{ sap_swpm_sid }}" + # hana_primary, hana_secondary, nwas_abap_ascs, nwas_abap_ers, nwas_abap_pas, nwas_abap_aas, nwas_java_scs, nwas_java_ers + sap_storage_setup_host_type: + - nwas_abap_aas + - name: Ansible Play for downloading SAP S/4HANA installation media hosts: nwas_pas diff --git a/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf b/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf index db7493b..ba31949 100644 --- a/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf +++ b/all/ansible_sap_s4hana_distributed_install_maintplan/create_ansible_extravars.tf @@ -656,7 +656,7 @@ sap_update_profile_default_profile_params: # Use Ansible Task to convert JSON (as string) to sap_storage_setup_definition Dictionary -terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition),"\"","\\\"")}' | from_json }}" +terraform_host_specification_storage_definition: "{{ '${replace(jsonencode(var.module_var_terraform_host_specification_storage_definition_all_hosts),"\"","\\\"")}' | from_json }}" EOF } diff --git a/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf b/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf index b2739a6..70c6435 100644 --- a/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf +++ b/all/ansible_sap_s4hana_distributed_install_maintplan/module_variables.tf @@ -88,6 +88,6 @@ variable "module_var_inventory_nwas_pas" {} variable "module_var_inventory_nwas_aas" {} -variable "module_var_terraform_host_specification_storage_definition" { +variable "module_var_terraform_host_specification_storage_definition_all_hosts" { default = {} } From 7ff5893a1b519f0d42eaa24fa4d43bc193015dbd Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Wed, 31 Jan 2024 10:21:40 +0000 Subject: [PATCH 23/25] fix: gh action error --- .github/workflows/terraform_ansible_software_availability.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/terraform_ansible_software_availability.yml b/.github/workflows/terraform_ansible_software_availability.yml index f3a62f9..57340dd 100644 --- a/.github/workflows/terraform_ansible_software_availability.yml +++ b/.github/workflows/terraform_ansible_software_availability.yml @@ -72,6 +72,7 @@ jobs: echo 'import sys' >> python_module_fuzzy_call_script.py echo 'input_search_file=sys.argv[1]' >> python_module_fuzzy_call_script.py echo 'input_search_file_name_and_version_only=sys.argv[2]' >> python_module_fuzzy_call_script.py + echo 'from module_utils.sap_id_sso import sap_sso_login' >> python_module_fuzzy_call_script.py echo 'from module_utils.sap_launchpad_software_center_download_search_fuzzy import *' >> python_module_fuzzy_call_script.py echo "username='$sap_user_id'" >> python_module_fuzzy_call_script.py echo "password='$sap_user_id_password'" >> python_module_fuzzy_call_script.py From a5a1a738d1394cb877d70e392da37abf4aed3d93 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Wed, 31 Jan 2024 10:53:32 +0000 Subject: [PATCH 24/25] fix: requirements --- .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ .../ansible_requirements_collections.yml | 20 +++++++------------ 21 files changed, 147 insertions(+), 273 deletions(-) diff --git a/all/ansible_sap_bw4hana_install/ansible_requirements_collections.yml b/all/ansible_sap_bw4hana_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_bw4hana_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_bw4hana_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_ecc_hana_install/ansible_requirements_collections.yml b/all/ansible_sap_ecc_hana_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_ecc_hana_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_ecc_hana_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_requirements_collections.yml b/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_requirements_collections.yml +++ b/all/ansible_sap_ecc_hana_system_copy_hdb/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_ecc_ibmdb2_install/ansible_requirements_collections.yml b/all/ansible_sap_ecc_ibmdb2_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_ecc_ibmdb2_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_ecc_ibmdb2_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_ecc_oracledb_install/ansible_requirements_collections.yml b/all/ansible_sap_ecc_oracledb_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_ecc_oracledb_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_ecc_oracledb_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_ecc_sapase_install/ansible_requirements_collections.yml b/all/ansible_sap_ecc_sapase_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_ecc_sapase_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_ecc_sapase_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_ecc_sapmaxdb_install/ansible_requirements_collections.yml b/all/ansible_sap_ecc_sapmaxdb_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_ecc_sapmaxdb_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_ecc_sapmaxdb_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_hana_install/ansible_requirements_collections.yml b/all/ansible_sap_hana_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_hana_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_hana_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_nwas_abap_hana_install/ansible_requirements_collections.yml b/all/ansible_sap_nwas_abap_hana_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_nwas_abap_hana_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_nwas_abap_hana_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_requirements_collections.yml b/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_nwas_abap_ibmdb2_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_nwas_abap_oracledb_install/ansible_requirements_collections.yml b/all/ansible_sap_nwas_abap_oracledb_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_nwas_abap_oracledb_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_nwas_abap_oracledb_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_nwas_abap_sapase_install/ansible_requirements_collections.yml b/all/ansible_sap_nwas_abap_sapase_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_nwas_abap_sapase_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_nwas_abap_sapase_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_requirements_collections.yml b/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_nwas_abap_sapmaxdb_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_nwas_java_ibmdb2_install/ansible_requirements_collections.yml b/all/ansible_sap_nwas_java_ibmdb2_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_nwas_java_ibmdb2_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_nwas_java_ibmdb2_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_nwas_java_sapase_install/ansible_requirements_collections.yml b/all/ansible_sap_nwas_java_sapase_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_nwas_java_sapase_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_nwas_java_sapase_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_requirements_collections.yml b/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_requirements_collections.yml +++ b/all/ansible_sap_s4hana_distributed_install_maintplan/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_s4hana_install/ansible_requirements_collections.yml b/all/ansible_sap_s4hana_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_s4hana_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_s4hana_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_s4hana_install_maintplan/ansible_requirements_collections.yml b/all/ansible_sap_s4hana_install_maintplan/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_s4hana_install_maintplan/ansible_requirements_collections.yml +++ b/all/ansible_sap_s4hana_install_maintplan/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_s4hana_system_copy_hdb/ansible_requirements_collections.yml b/all/ansible_sap_s4hana_system_copy_hdb/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_s4hana_system_copy_hdb/ansible_requirements_collections.yml +++ b/all/ansible_sap_s4hana_system_copy_hdb/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_solman_sapase_install/ansible_requirements_collections.yml b/all/ansible_sap_solman_sapase_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_solman_sapase_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_solman_sapase_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections diff --git a/all/ansible_sap_solman_saphana_install/ansible_requirements_collections.yml b/all/ansible_sap_solman_saphana_install/ansible_requirements_collections.yml index c5c45ba..e221a76 100644 --- a/all/ansible_sap_solman_saphana_install/ansible_requirements_collections.yml +++ b/all/ansible_sap_solman_saphana_install/ansible_requirements_collections.yml @@ -10,24 +10,18 @@ collections: version: 1.33.0 - name: community.sap_install type: galaxy - version: 1.2.3 -# - name: community.sap_launchpad -# type: galaxy -# version: 0.0.0 -# - name: community.sap_operations -# type: galaxy -# version: 0.0.0 + version: 1.3.4 + - name: community.sap_launchpad + type: galaxy + version: 1.1.0 + - name: community.sap_operations + type: galaxy + version: 0.9.1 # Collections from public repositories via HTTPS # - name: https://github.com/sap-linuxlab/community.sap_install.git # type: git # version: main - - name: https://github.com/sap-linuxlab/community.sap_launchpad.git - type: git - version: main - - name: https://github.com/sap-linuxlab/community.sap_operations.git - type: git - version: main # Collections from private repositories via use SSH (embedded GitHub PAT does not work) # Used for customised/forked Ansible Collections From f6a86648e9b7454f01daefba5d47c06b9111b168 Mon Sep 17 00:00:00 2001 From: sean-freeman <1815807+sean-freeman@users.noreply.github.com> Date: Wed, 31 Jan 2024 11:06:11 +0000 Subject: [PATCH 25/25] fix: gh action updates --- .../workflows/terraform_ansible_software_availability.yml | 2 +- .github/workflows/terraform_validate.yml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/terraform_ansible_software_availability.yml b/.github/workflows/terraform_ansible_software_availability.yml index 57340dd..070026d 100644 --- a/.github/workflows/terraform_ansible_software_availability.yml +++ b/.github/workflows/terraform_ansible_software_availability.yml @@ -23,7 +23,7 @@ jobs: terraform_module_ansible: [ansible_sap_bw4hana_install, ansible_sap_ecc_hana_install, ansible_sap_ecc_hana_system_copy_hdb, ansible_sap_ecc_ibmdb2_install, ansible_sap_ecc_oracledb_install, ansible_sap_ecc_sapase_install, ansible_sap_ecc_sapmaxdb_install, ansible_sap_hana_install, ansible_sap_nwas_abap_hana_install, ansible_sap_nwas_abap_ibmdb2_install, ansible_sap_nwas_abap_oracledb_install, ansible_sap_nwas_abap_sapase_install, ansible_sap_nwas_abap_sapmaxdb_install, ansible_sap_nwas_java_ibmdb2_install, ansible_sap_nwas_java_sapase_install, ansible_sap_s4hana_distributed_install_maintplan, ansible_sap_s4hana_install, ansible_sap_s4hana_install_maintplan, ansible_sap_s4hana_system_copy_hdb, ansible_sap_solman_sapase_install, ansible_sap_solman_saphana_install] steps: - name: Checkout - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 with: path: gh_repo - name: Ansible setup diff --git a/.github/workflows/terraform_validate.yml b/.github/workflows/terraform_validate.yml index afcbfea..d84f9ae 100644 --- a/.github/workflows/terraform_validate.yml +++ b/.github/workflows/terraform_validate.yml @@ -18,13 +18,13 @@ jobs: fail-fast: false max-parallel: 10 matrix: - terraform_ver: [~1.0.0, ~1.1.0, ~1.2.0, ~1.3.0] + terraform_ver: [~1.0.0, ~1.1.0, ~1.2.0, ~1.3.0, ~1.4.0, =1.5.5] terraform_module_parent: [all, aws_ec2_instance, gcp_ce_vm, ibmcloud_vs, ibmcloud_powervs, ibmpowervc, msazure_vm, vmware_vm] steps: - name: Checkout - uses: actions/checkout@v3.1.0 + uses: actions/checkout@v4 - name: Setup Terraform - uses: hashicorp/setup-terraform@v2.0.3 + uses: hashicorp/setup-terraform@v3.0.0 with: terraform_version: ${{ matrix.terraform_ver }} - name: Terraform Init