|
2 | 2 | # Script to convert HyperV datasync image to Azure
|
3 | 3 | # Runs on Amazon Linux 2 x86_64 only
|
4 | 4 |
|
5 |
| -set -e |
6 |
| - |
7 |
| -while getopts ":d:l:r:v:g:n:s:" opt; do |
8 |
| - case $opt in |
9 |
| - d) deployment_type="$OPTARG" |
10 |
| - ;; |
11 |
| - l) location="$OPTARG" |
12 |
| - ;; |
13 |
| - r) resource_group="$OPTARG" |
14 |
| - ;; |
15 |
| - v) vm_name="$OPTARG" |
16 |
| - ;; |
17 |
| - g) vnet_rg="$OPTARG" |
18 |
| - ;; |
19 |
| - n) vnet_name="$OPTARG" |
20 |
| - ;; |
21 |
| - s) subnet_name="$OPTARG" |
22 |
| - ;; |
23 |
| - \?) echo "Invalid option -$OPTARG" >&2 |
24 |
| - exit 1 |
25 |
| - ;; |
26 |
| - esac |
27 |
| - |
28 |
| - case $OPTARG in |
29 |
| - -*) echo "Option $opt needs a valid argument" |
30 |
| - exit 1 |
31 |
| - ;; |
32 |
| - esac |
33 |
| -done |
| 5 | +set -euo pipefail |
| 6 | + |
| 7 | +# Color definitions for logs |
| 8 | +YELLOW='\033[0;33m' |
| 9 | +CYAN='\033[0;36m' |
| 10 | +GREEN='\033[0;32m' |
| 11 | +RED='\033[0;31m' |
| 12 | +RESET='\033[0m' |
| 13 | + |
| 14 | +# Log warning messages |
| 15 | +function log_warning() { |
| 16 | + echo -e "${YELLOW}[WARNING] $1${RESET}" |
| 17 | +} |
34 | 18 |
|
35 |
| -shift $((OPTIND -1)) |
| 19 | +# Log error messages |
| 20 | +function log_error() { |
| 21 | + echo -e "${RED}[ERROR] $1${RESET}" |
| 22 | +} |
36 | 23 |
|
37 |
| -# Check if deployment type is "new_vnet" or "existing_vnet" |
38 |
| -if [ "$deployment_type" != "new_vnet" ] && [ "$deployment_type" != "existing_vnet" ]; then |
39 |
| - echo "Invalid value for deployment type (-d): $deployment_type. Deployment type must be 'new_vnet' or 'existing_vnet'." |
40 |
| - exit 1 |
41 |
| -fi |
| 24 | +# Log informational messages |
| 25 | +function log_info() { |
| 26 | + echo -e "${CYAN}[INFO] $1${RESET}" |
| 27 | +} |
42 | 28 |
|
43 |
| -# Mandatory parameters deployment_type, location, resource_group and vm_name |
44 |
| -if [ -z "$deployment_type" ] || [ -z "$location" ] || [ -z "$resource_group" ] || [ -z "$vm_name" ]; then |
45 |
| - echo "Required parameters are missing. Usage: -d [new_vnet|existing_vnet] -l [location] -r [resource_group] -v [vm_name] [-g [vnet_rg]] [-n [vnet_name]] [-s [subnet_name]]" |
46 |
| - exit 1 |
47 |
| -fi |
| 29 | +# Log success messages |
| 30 | +function log_success() { |
| 31 | + echo -e "${GREEN}[SUCCESS] $1${RESET}" |
| 32 | +} |
48 | 33 |
|
| 34 | +# Display help information |
| 35 | +function show_help() { |
| 36 | + echo -e "${CYAN}AWS DataSync Deployment for Azure${RESET}" |
| 37 | + echo -e "For more details, visit: ${GREEN}https://github.com/aws-samples/aws-datasync-deploy-agent-azure${RESET}" |
| 38 | + echo |
| 39 | + echo -e "${CYAN}Usage:${RESET} $0 [options]" |
| 40 | + echo |
| 41 | + echo -e "${CYAN}Options:${RESET}" |
| 42 | + echo " -d <deployment_type> Deployment type ('new_vnet' or 'existing_vnet')" |
| 43 | + echo " -l <location> Azure region (e.g., 'eastus', 'westus')" |
| 44 | + echo " -r <resource_group> Azure resource group name" |
| 45 | + echo " -v <vm_name> Azure VM name" |
| 46 | + echo " -z <vm_size> Azure VM size (e.g., 'Standard_E4s_v3', 'Standard_E16_v5')" |
| 47 | + echo " -g <vnet_rg> Virtual network resource group (required for 'existing_vnet')" |
| 48 | + echo " -n <vnet_name> Virtual network name (required for 'existing_vnet')" |
| 49 | + echo " -s <subnet_name> Subnet name (required for 'existing_vnet')" |
| 50 | + echo " -h Show this help message" |
| 51 | + echo |
| 52 | + echo -e "${CYAN}Examples:${RESET}" |
| 53 | + echo " $0 -d new_vnet -l eastus -r myResourceGroup -v myVM -z Standard_E4s_v3" |
| 54 | + echo " $0 -d existing_vnet -l eastus -r myResourceGroup -v myVM -g myVnetRG -n myVnet -s mySubnet -z Standard_E16_v5" |
| 55 | + exit 0 |
| 56 | +} |
49 | 57 |
|
| 58 | +# Validate inputs to ensure required parameters are provided |
| 59 | +function validate_inputs() { |
| 60 | + log_info "Validating input parameters..." |
| 61 | + if [ -z "${deployment_type:-}" ] || [ -z "${location:-}" ] || [ -z "${resource_group:-}" ] || [ -z "${vm_name:-}" ] || [ -z "${vm_size:-}" ]; then |
| 62 | + log_error "Missing required parameters. Use -h for help." |
| 63 | + exit 1 |
| 64 | + fi |
50 | 65 |
|
51 |
| -# Check if deployment_type is existing_vnet, then vnet_rg, vnet_name, and subnet_name are mandatory |
52 |
| -if [ "$deployment_type" == "existing_vnet" ] && { [ -z "$vnet_rg" ] || [ -z "$vnet_name" ] || [ -z "$subnet_name" ]; }; then |
53 |
| - echo "-g [vnet_rg] -n [vnet_name] -s [subnet_name] are mandatory when deployment_type [-d] is 'existing_vnet'." |
54 |
| - exit 1 |
55 |
| -fi |
| 66 | + if [[ "$deployment_type" != "new_vnet" && "$deployment_type" != "existing_vnet" ]]; then |
| 67 | + log_error "Invalid deployment type (-d). Must be 'new_vnet' or 'existing_vnet'." |
| 68 | + exit 1 |
| 69 | + fi |
56 | 70 |
|
57 |
| -#if [ $# -ne 12 ]; then |
58 |
| -# echo "Missing -l or -r or -v or -g or -n or -s" |
59 |
| -# exit 1 |
60 |
| -#fi |
| 71 | + if [ "$deployment_type" == "existing_vnet" ] && { [ -z "${vnet_rg:-}" ] || [ -z "${vnet_name:-}" ] || [ -z "${subnet_name:-}" ]; }; then |
| 72 | + log_error "-g, -n, and -s are mandatory when deployment type is 'existing_vnet'." |
| 73 | + exit 1 |
| 74 | + fi |
| 75 | +} |
61 | 76 |
|
| 77 | +# Perform system pre-checks to ensure the script runs correctly |
| 78 | +function pre_checks() { |
| 79 | + log_info "Performing system pre-checks..." |
| 80 | + if [ "$(uname -m)" != "x86_64" ]; then |
| 81 | + log_error "This script only supports x86_64 architecture." |
| 82 | + exit 1 |
| 83 | + fi |
62 | 84 |
|
63 |
| -# Exiting if not running on X86_64 architecture |
64 |
| -arch=$(uname -m)||(arch) |
65 |
| -if [ "$arch" != "x86_64" ]; then |
66 |
| - echo "This script runs only on x86_64 architecture" |
67 |
| - exit 1 |
68 |
| -fi |
| 85 | + if [ ! -w "/tmp" ]; then |
| 86 | + log_error "The /tmp directory is not writable." |
| 87 | + exit 1 |
| 88 | + fi |
| 89 | +} |
69 | 90 |
|
70 |
| -# Exiting if /tmp is not writable |
71 |
| -if [ ! -w "/tmp" ]; then |
72 |
| - echo "Unable to write to /tmp. exiting" |
73 |
| - exit 1 |
74 |
| -fi |
| 91 | +# Clean up conflicting files in /tmp |
| 92 | +function cleanup_tmp() { |
| 93 | + log_info "Cleaning up /tmp directory to avoid conflicts..." |
| 94 | + rm -f /tmp/azcopy.tar.gz /tmp/datasync.zip |
| 95 | + rm -rf /tmp/azcopy_linux_amd64* |
| 96 | + rm -f /tmp/*.vhdx /tmp/*.raw /tmp/*.vhd |
| 97 | + log_success "Temporary files removed." |
| 98 | +} |
75 | 99 |
|
76 |
| -echo -e "\033[0;33mArgument deployment type is $deployment_type\033[0m" |
77 |
| -echo -e "\033[0;33mArgument location is $location\033[0m" |
78 |
| -echo -e "\033[0;33mArgument vm resource group is $resource_group\033[0m" |
79 |
| -echo -e "\033[0;33mArgument vm name is $vm_name\033[0m" |
80 |
| -echo -e "\033[0;33mArgument vnet resource group is $vnet_rg\033[0m" |
81 |
| -echo -e "\033[0;33mArgument vnet name is $vnet_name\033[0m" |
82 |
| -echo -e "\033[0;33mArgument subnet name is $subnet_name\033[0m" |
83 |
| - |
84 |
| -AZCOPY_VERSION=v10 |
85 |
| - |
86 |
| -function download_and_install_dependencies () { |
87 |
| - echo -e "[azure-cli] |
88 |
| -name=Azure CLI |
89 |
| -baseurl=https://packages.microsoft.com/yumrepos/azure-cli |
90 |
| -enabled=1 |
91 |
| -gpgcheck=1 |
92 |
| -gpgkey=https://packages.microsoft.com/keys/microsoft.asc" | tee /etc/yum.repos.d/azure-cli.repo > /dev/null |
93 |
| - yum -y -q install qemu-img jq unzip azure-cli && yum -y clean all && rm -rf /var/cache |
94 |
| - echo "\033[0;33mWarning azcopy will be downloaded from Internet but there is no integrity hash available.\033[0;33m" |
95 |
| - curl -Ls "https://aka.ms/downloadazcopy-$AZCOPY_VERSION-linux" -o /tmp/azcopy.tar.gz |
96 |
| - tar xzf /tmp/azcopy.tar.gz --directory /tmp || { echo "AzCopy download or extraction failed"; exit 1; } |
97 |
| - cp /tmp/azcopy_linux_amd64*/azcopy /usr/bin/azcopy |
| 100 | +# Install required dependencies |
| 101 | +function setup_dependencies() { |
| 102 | + log_info "Installing required dependencies..." |
| 103 | + echo "[azure-cli]" | tee /etc/yum.repos.d/azure-cli.repo > /dev/null |
| 104 | + echo "name=Azure CLI" | tee -a /etc/yum.repos.d/azure-cli.repo > /dev/null |
| 105 | + echo "baseurl=https://packages.microsoft.com/yumrepos/azure-cli" | tee -a /etc/yum.repos.d/azure-cli.repo > /dev/null |
| 106 | + echo "enabled=1" | tee -a /etc/yum.repos.d/azure-cli.repo > /dev/null |
| 107 | + echo "gpgcheck=1" | tee -a /etc/yum.repos.d/azure-cli.repo > /dev/null |
| 108 | + echo "gpgkey=https://packages.microsoft.com/keys/microsoft.asc" | tee -a /etc/yum.repos.d/azure-cli.repo > /dev/null |
| 109 | + yum install -y qemu-img jq unzip azure-cli || { |
| 110 | + log_error "Failed to install dependencies." |
| 111 | + exit 1 |
| 112 | + } |
| 113 | + |
| 114 | + log_info "Downloading AzCopy..." |
| 115 | + curl -Ls "https://aka.ms/downloadazcopy-v10-linux" -o /tmp/azcopy.tar.gz |
| 116 | + tar -xf /tmp/azcopy.tar.gz -C /tmp || { |
| 117 | + log_error "Failed to download or extract AzCopy." |
| 118 | + exit 1 |
| 119 | + } |
| 120 | + mv /tmp/azcopy_linux_amd64*/azcopy /usr/bin/ |
98 | 121 | chmod +x /usr/bin/azcopy
|
| 122 | + log_success "Dependencies installed successfully." |
99 | 123 | }
|
100 | 124 |
|
101 |
| -function download_datasync(){ |
102 |
| - echo -e "\033[0;33mDownloading datasync agent for Hyper-V. There is no integrity hash available\033[0;33m" |
103 |
| - curl -s https://d8vjazrbkazun.cloudfront.net/AWS-DataSync-Agent-HyperV.zip -o /tmp/datasync.zip |
| 125 | +# Download the AWS DataSync agent |
| 126 | +function download_datasync() { |
| 127 | + log_info "Downloading AWS DataSync agent for Hyper-V..." |
| 128 | + curl -s https://d8vjazrbkazun.cloudfront.net/AWS-DataSync-Agent-HyperV.zip -o /tmp/datasync.zip || { |
| 129 | + log_error "Failed to download AWS DataSync agent." |
| 130 | + exit 1 |
| 131 | + } |
| 132 | + log_success "AWS DataSync agent downloaded successfully." |
104 | 133 | }
|
105 | 134 |
|
106 |
| -function convert_datasync(){ |
107 |
| - echo -e "\033[0;33mConverting datasync to vhd\033[0;33m" |
108 |
| - unzip /tmp/datasync.zip -d /tmp || { echo "AWS DataSync Agent download or extraction failed"; exit 1; } |
109 |
| - vhdxdisk=$(find aws-*) |
| 135 | +# Convert the downloaded DataSync agent to a VHD format |
| 136 | +function convert_datasync() { |
| 137 | + log_info "Converting AWS DataSync agent to VHD format..." |
| 138 | + unzip /tmp/datasync.zip -d /tmp || { |
| 139 | + log_error "Failed to extract AWS DataSync agent." |
| 140 | + exit 1 |
| 141 | + } |
| 142 | + |
| 143 | + vhdxdisk=$(find /tmp -name '*.vhdx' | head -n 1) |
110 | 144 | rawdisk=${vhdxdisk//vhdx/raw}
|
111 | 145 | vhddisk=${vhdxdisk//vhdx/vhd}
|
112 | 146 |
|
113 | 147 | qemu-img convert -f vhdx -O raw "$vhdxdisk" "$rawdisk"
|
114 | 148 |
|
115 |
| - MB=$((1024*1024)) |
116 |
| - size=$(qemu-img info -f raw --output json "$rawdisk" | jq -r '.["virtual-size"]') |
117 |
| - rounded_size=$((((size+MB-1)/MB)*MB)) |
118 |
| - echo "Rounded Size = $rounded_size" |
| 149 | + MB=$((1024 * 1024)) |
| 150 | + size=$(qemu-img info -f raw --output json "$rawdisk" | jq -r '."virtual-size"') |
| 151 | + rounded_size=$((((size + MB - 1) / MB) * MB)) |
| 152 | + |
119 | 153 | qemu-img resize "$rawdisk" "$rounded_size"
|
120 | 154 | qemu-img convert -f raw -o subformat=fixed,force_size -O vpc "$rawdisk" "$vhddisk"
|
121 | 155 |
|
122 | 156 | rm "$rawdisk"
|
123 |
| - disk_name=${vhddisk//\.xfs\.gpt\.vhd} |
124 |
| - upload_size=$(qemu-img info --output json "$vhddisk" | jq -r '.["virtual-size"]') |
| 157 | + disk_name=$(basename "$vhddisk" .vhd) |
| 158 | + upload_size=$(qemu-img info --output json "$vhddisk" | jq -r '."virtual-size"') |
| 159 | + log_success "DataSync agent converted to VHD successfully." |
125 | 160 | }
|
126 | 161 |
|
127 |
| -function check_resource_group(){ |
128 |
| - group_exists=$(az group exists --resource-group "$resource_group") |
129 |
| - if [ "$group_exists" = "false" ]; then |
130 |
| - az group create --location "$location" --resource-group "$resource_group" --only-show-errors || { echo "An error occurred while creating the resource group"; exit 1; } |
| 162 | +# Check if the specified resource group exists, create it if not |
| 163 | +function check_resource_group() { |
| 164 | + log_info "Checking Azure resource group: $resource_group..." |
| 165 | + if [ "$(az group exists --name "$resource_group")" == "false" ]; then |
| 166 | + az group create --location "$location" --resource-group "$resource_group" --only-show-errors || { |
| 167 | + log_error "Failed to create resource group." |
| 168 | + exit 1 |
| 169 | + } |
131 | 170 | fi
|
| 171 | + log_success "Azure resource group is ready." |
132 | 172 | }
|
133 | 173 |
|
134 |
| -function upload_to_azure(){ |
135 |
| - echo -e "\033[0;33mUploading to Azure\033[0;33m" |
136 |
| - az login --use-device-code |
| 174 | +# Upload the converted VHD to Azure |
| 175 | +function upload_to_azure() { |
| 176 | + log_info "Uploading VHD to Azure..." |
| 177 | + az login || { |
| 178 | + log_error "Failed to login to Azure." |
| 179 | + exit 1 |
| 180 | + } |
| 181 | + |
137 | 182 | check_resource_group
|
138 |
| - # shellcheck disable=SC2086 |
139 |
| - # az disk create does not accept a string for upload-size-bytes |
140 |
| - az disk create -n "$disk_name" -g "$resource_group" -l "$location" --os-type Linux --upload-type Upload --upload-size-bytes $upload_size --sku standard_lrs --output none --only-show-errors || { echo "An error occured while creating the Azure Disk"; exit 1; } |
141 |
| - sas_uri=$(az disk grant-access -n "$disk_name" -g "$resource_group" --access-level Write --duration-in-seconds 86400 | jq -r '.accessSas') || { echo "An error occurred while granting SAS access"; exit 1; } |
142 |
| - azcopy copy "$vhddisk" "$sas_uri" --blob-type PageBlob || { echo "An error occurred while uploading the Azure Disk"; exit 1; } |
143 |
| - az disk revoke-access -n "$disk_name" -g "$resource_group" || { echo "An error occurred while revoking SAS access"; exit 1; } |
144 |
| -} |
145 | 183 |
|
146 |
| -function create_azure_vm(){ |
147 |
| - echo -e "\033[0;33mCreating Azure Virtual Machine for DataSync with a new vnet\033[0;33m" |
148 |
| - az vm create -g "$resource_group" -l "$location" --name "$vm_name" --size Standard_E4as_v5 --os-type linux --attach-os-disk "$disk_name" --public-ip-address "" --only-show-errors || { echo "An error occured while creating the Azure VM"; exit 1; } |
149 |
| -} |
| 184 | + az disk create -n "$disk_name" -g "$resource_group" -l "$location" --os-type Linux --upload-type Upload --upload-size-bytes "$upload_size" --sku Standard_LRS --only-show-errors || { |
| 185 | + log_error "Failed to create Azure disk." |
| 186 | + exit 1 |
| 187 | + } |
| 188 | + |
| 189 | + sas_uri=$(az disk grant-access -n "$disk_name" -g "$resource_group" --access-level Write --duration-in-seconds 86400 | jq -r '.accessSas') || { |
| 190 | + log_error "Failed to grant SAS access." |
| 191 | + exit 1 |
| 192 | + } |
150 | 193 |
|
151 |
| -function create_azure_vm_existing_vnet(){ |
152 |
| - echo -e "\033[0;33mCreating Azure Virtual Machine for DataSync with an existing vnet\033[0;33m" |
153 |
| - az vm create -g "$resource_group" -l "$location" --name "$vm_name" --size Standard_E4as_v5 --os-type linux --attach-os-disk "$disk_name" --subnet "$(az network vnet subnet show --resource-group $vnet_rg --vnet-name $vnet_name --name $subnet_name -o tsv --query id)" --public-ip-address "" --only-show-errors || { echo "An error occured while creating the Azure VM"; exit 1; } |
| 194 | + azcopy copy "$vhddisk" "$sas_uri" --blob-type PageBlob || { |
| 195 | + log_error "Failed to upload VHD to Azure." |
| 196 | + exit 1 |
| 197 | + } |
| 198 | + |
| 199 | + az disk revoke-access -n "$disk_name" -g "$resource_group" |
| 200 | + log_success "VHD uploaded to Azure successfully." |
154 | 201 | }
|
155 | 202 |
|
156 |
| -function cleanup(){ |
157 |
| - rm -f /tmp/datasync.zip |
158 |
| - rm -rf /tmp/aws-datasync-* |
159 |
| - rm -rf /tmp/azcopy* |
160 |
| - az logout || true |
161 |
| - echo -e "\033[0m" |
| 203 | +# Create a new Azure VM using the uploaded VHD |
| 204 | +function create_vm() { |
| 205 | + log_info "Creating Azure VM: $vm_name..." |
| 206 | + if [ "$deployment_type" == "new_vnet" ]; then |
| 207 | + az vm create -g "$resource_group" -l "$location" --name "$vm_name" --size "$vm_size" --os-type Linux --attach-os-disk "$disk_name" --public-ip-address "" --only-show-errors || { |
| 208 | + log_error "Failed to create Azure VM." |
| 209 | + exit 1 |
| 210 | + } |
| 211 | + else |
| 212 | + subnet_id=$(az network vnet subnet show --resource-group "$vnet_rg" --vnet-name "$vnet_name" --name "$subnet_name" -o tsv --query id) |
| 213 | + az vm create -g "$resource_group" -l "$location" --name "$vm_name" --size "$vm_size" --os-type Linux --attach-os-disk "$disk_name" --subnet "$subnet_id" --public-ip-address "" --only-show-errors || { |
| 214 | + log_error "Failed to create Azure VM." |
| 215 | + exit 1 |
| 216 | + } |
| 217 | + fi |
| 218 | + log_success "Azure VM created successfully." |
162 | 219 | }
|
163 | 220 |
|
164 |
| -pushd /tmp |
165 |
| -cleanup |
166 |
| -download_and_install_dependencies |
| 221 | +# Display help if no arguments are provided |
| 222 | +if [ "$#" -eq 0 ]; then |
| 223 | + show_help |
| 224 | +fi |
| 225 | + |
| 226 | +# Parse command-line arguments |
| 227 | +while getopts ":d:l:r:v:g:n:s:z:h" opt; do |
| 228 | + case $opt in |
| 229 | + d) deployment_type="$OPTARG" ;; |
| 230 | + l) location="$OPTARG" ;; |
| 231 | + r) resource_group="$OPTARG" ;; |
| 232 | + v) vm_name="$OPTARG" ;; |
| 233 | + g) vnet_rg="$OPTARG" ;; |
| 234 | + n) vnet_name="$OPTARG" ;; |
| 235 | + s) subnet_name="$OPTARG" ;; |
| 236 | + z) vm_size="$OPTARG" ;; |
| 237 | + h) show_help ;; |
| 238 | + *) |
| 239 | + log_error "Invalid option: -$OPTARG" |
| 240 | + exit 1 |
| 241 | + ;; |
| 242 | + esac |
| 243 | + |
| 244 | +done |
| 245 | + |
| 246 | +# Execute the main workflow |
| 247 | +validate_inputs |
| 248 | +pre_checks |
| 249 | +cleanup_tmp |
| 250 | +setup_dependencies |
167 | 251 | download_datasync
|
168 | 252 | convert_datasync
|
169 | 253 | upload_to_azure
|
170 |
| -popd |
171 |
| - |
172 |
| -if [ "$deployment_type" == "new_vnet" ]; then |
173 |
| - echo "Deployment type is new_vnet" |
174 |
| - create_azure_vm |
175 |
| -elif [ "$deployment_type" == "existing_vnet" ]; then |
176 |
| - echo "Deployment type is existing_vnet" |
177 |
| - create_azure_vm_existing_vnet |
178 |
| -fi |
179 |
| -cleanup |
| 254 | +create_vm |
0 commit comments