-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.ini
More file actions
495 lines (427 loc) · 20.1 KB
/
config.ini
File metadata and controls
495 lines (427 loc) · 20.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
# config.ini - HOL-2703 - VCF-Memory Tiering Lab
# Version 1.0 2026-03-30
# Changes:
# - Added cross-references to code consumers for every config key
# Author - Burke Azbill and HOL Core Team
#
# This file is copied to /tmp/config.ini by labstartup.sh
# Section and option names are case-sensitive
#
# Cross-reference key:
# Startup modules: Startup/prelim.py, ESXi.py, vSphere.py, VCF.py,
# services.py, Kubernetes.py, VCFfinal.py, final.py,
# pings.py, urls.py, odyssey.py
# Shutdown modules: Shutdown/Shutdown.py, VCFshutdown.py, fleet.py
# Tools: Tools/vpodchecker.py, confighol-9.0.py, confighol-9.1.py,
# generate_labdetails.py, offline-ready.py, tdns_import.py,
# labtypes.py
#==============================================================================
# [VPOD] - Lab identity and global settings
# Read by: Startup/prelim.py, final.py, lsfunctions.py (init)
#==============================================================================
[VPOD]
# Lab SKU - must match the repository name
# Read by: lsfunctions.py init(), prelim.py, Tools/generate_labdetails.py
vPod_SKU = HOL-BADSKU
# Lab type determines the startup sequence and features
# Valid values: HOL, DISCOVERY, VXP, ATE, EDU
# Read by: lsfunctions.py init(), Shutdown/Shutdown.py (selects VCF shutdown path),
# Tools/labtypes.py, Tools/generate_labdetails.py
labtype = HOL
# Whether to lock holuser account in production (security)
# Read by: Tools/offline-ready.py (sets to false for offline prep)
# NOTE: Startup/final.py uses holuser_lock (below), not lockholuser
lockholuser = true
# Maximum minutes before lab startup fails
# Read by: lsfunctions.py init() (startup timeout watchdog)
maxminutes = 60
# Enable Odyssey client installation (VLP feature)
# Read by: Startup/odyssey.py
odyssey = false
# Interval for labcheck runs (minutes, 0 = disabled)
# UNUSED - not currently consumed by any script
# See labcheck_enabled below for the boolean toggle
# labcheckinterval = 15
# Custom conky title (if not specified, uses vPod_SKU)
# Read by: lsfunctions.py (desktop widget display)
# conky_title = NOT APPLICABLE TO HOL LABS
# Oh-My-Posh Theme Toggle -- Learn more about oh-my-posh at https://ohmyposh.dev
# The lab console and manager have oh-my-posh installed, but disabled by default.
# It uses the holoconsole-notime.omp.json (saved locally as holoconsole.omp.json) from https://github.com/burkeazbill/DimensionQuestDotFiles/blob/main/OhMyPosh/README.md
# If true, the oh-my-posh theme will be enabled and the holoconsole.omp.json file will be used.
# If false, the oh-my-posh theme will be disabled and the default theme will be used.
# Read by: .bashrc on manager and console VMs
#ohmyposh_enabled = true
# Enable VS Code proxy configuration on the console VM (default: true)
# Requires allowlist entries:
# VS Code Marketplace (required for extension browsing/install)
# marketplace.visualstudio.com
# visualstudio.com
# vsassets.io
# vscode-unpkg.net
# vscode-cdn.net
# vscode.dev
# When true, configures http.proxy and http.noProxy in VS Code settings
# for the holuser account on the console, enabling Marketplace access
# through the holorouter Squid proxy while bypassing internal subnets
# Read by: Startup/prelim.py
enablevscodeproxy = false
# Enable/disable labcheck periodic runs (default: true)
# Read by: Startup/final.py
#labcheck_enabled = true
# Enable/disable holuser account lock in production (default: follows lockholuser)
# Read by: Startup/final.py
#holuser_lock = true
# DNS record import format: zone,name,type,value
# THIS ONLY APPLIES ONCE a Technitium DNS Holorouter is incorporated into pod
# Multiple records separated by newlines (with indentation) or semicolons
# Read by: Tools/tdns_import.py
# Uncomment and add records as needed:
# new-dns-records = site-a.vcf.lab,gitlab,A,10.1.10.211
# site-a.vcf.lab,harbor,A,10.1.10.212
#==============================================================================
# [RESOURCES] - Components verified/started during Startup
# Read by: Startup/ESXi.py, vSphere.py, services.py, pings.py, urls.py,
# Kubernetes.py, final.py, VCFfinal.py
# Shutdown: Shutdown/VCFshutdown.py reads vCenters in Phase 1b (fallback),
# Phase 2 (main connect), Phase 3b/4 (SSO lookup), Phase 7 (mgmt reconnect)
#==============================================================================
[RESOURCES]
# ESXi hosts to verify SSH (port 22) and maintenance mode at startup
# Format: hostname:maintenance_mode_on_off (yes=enter MM, no=exit MM)
# Startup: ESXi.py checks SSH, vSphere.py manages maintenance mode
# Shutdown: NOT used directly; ESXi hosts come from [VCF] vcfmgmtcluster
# Tools: vpodchecker.py, confighol-9.0.py, confighol-9.1.py
ESXiHosts = esx-01a.site-a.vcf.lab:no
esx-02a.site-a.vcf.lab:no
esx-03a.site-a.vcf.lab:no
esx-04a.site-a.vcf.lab:no
esx-05a.site-a.vcf.lab:no
esx-06a.site-a.vcf.lab:no
esx-07a.site-a.vcf.lab:no
# vCenters to connect to during startup
# Format: hostname:type:sso_user (type: linux, windows, esx)
# Startup: vSphere.py connects, checks datastores/clusters/VMs
# Shutdown: VCFshutdown.py Phase 1b (fallback vCenter connect), Phase 2 (main connect),
# Phase 3b/4 (SSO user lookup), Phase 7 (mgmt vCenter reconnect)
# Tools: vpodchecker.py, confighol-9.0.py, confighol-9.1.py, VCFfinal.py
vCenters = vc-mgmt-a.site-a.vcf.lab:linux:administrator@vsphere.local
vc-wld01-a.site-a.vcf.lab:linux:administrator@wld.sso
#vc-mgmt-b.site-b.vcf.lab:linux:administrator@vsphere2.local
# Datastores to verify (hosts rescanned if missing)
# Format: VSAN:datastore_name or storage_host:datastore_name
# Startup: vSphere.py
Datastores = VSAN:vsan-mgmt-01a
#stg-01a.site-a.vcf.lab:ISCSI01-COMP01B
#VSAN:RegionA01-VSAN-COMP01A
#VSAN:RegionB01-VSAN-COMP01B
# DRS configuration at startup
# Format: clustername:on|off
# Startup: vSphere.py
Clusters = cluster-mgmt-01a:off
cluster-wld01-01a:off
#RegionB01-COMP01B:off
# Nested VMs to power on during startup
# Format: vmname:vcenter_or_esxhost (supports regex patterns)
# Startup: vSphere.py powers on in listed order
# Shutdown: NOT used; VMs shut down by name in VCFshutdown.py Phases 8-16
# Tools: confighol-9.0.py, confighol-9.1.py
VMs = sddcmanager-a:vc-mgmt-a.site-a.vcf.lab
vsp-01a-.*:vc-mgmt-a.site-a.vcf.lab
ops.*:vc-mgmt-a.site-a.vcf.lab
# vApps to power on during startup
# Format: vappname:vcenter
# Startup: vSphere.py
#vApps = YourvApp:vc-mgmt-01a.site-a.vcf.lab
# YourOthervApp:vc-mgmt-01a.site-a.vcf.lab
# IP addresses to ping-check during startup and final verification
# Startup: pings.py, final.py
Pings = 10.1.10.129
10.1.10.131
# Windows services to verify/start
# Format: server:service:passwd:waitsec
# Startup: services.py (LEGACY - no current Python consumer)
#WindowsServices = server:service:passwd:waitsec
# Site A SRM embedded database
#srm-01a.site-a.vcf.lab:vmware-dr-vpostgres::10
# Site A SRM server
#srm-01a.site-a.vcf.lab:vmware-dr::10
# Site B SRM embedded database::10
#srm-01b.site-b.vcf.lab:vmware-dr-vpostgres::10
# Site B SRM server
#srm-01b.site-b.vcf.lab:vmware-dr::10
# Linux services to verify/start
# Format: server:service:passwd:waitsec
# Startup: services.py
#LinuxServices = server:service:passwd:waitsec
# For vSphere 6.7 and 7.x, uncomment the vcsa-01x lines as needed
# For vSphere 8.x, leave the vcsa-01x lines commented out
# example to check vSphere ui service (site A)
#vc-mgmt-01a.site-a.vcf.lab:vsphere-ui::5
# example to check vSphere ui service (site B)
#vc-mgmt-01b.site-b.vcf.lab:vsphere-ui::5
# TCP ports to verify during startup
# Format: hostname:port
# Startup: services.py
TCPServices = vc-mgmt-a.site-a.vcf.lab:443
vc-wld01-a.site-a.vcf.lab:443
#vc-mgmt-b.site-b.vcf.lab:443
# Kubernetes certificate renewal
# Format: host:account:password:renewal_command
# Startup: Kubernetes.py
#Kubernetes = k8s-master.site-a.vcf.lab:root:******:kubeadm certs renew all
#k8s-master.site-a.vcf.lab:root:******:kubeadm alpha certs renew all
# URLs to check for expected response text
# Format: url,expected_text (200 status checked if no text specified)
# Startup: urls.py, final.py
# Tools: generate_labdetails.py, offline-ready.py
URLS = https://www.vmware.com/,VMware
# VCF 9 base templates
https://vc-mgmt-a.site-a.vcf.lab/ui/,loading-container
https://vc-mgmt-a.site-a.vcf.lab:5480/#/login,VMware vCenter Management
https://vc-wld01-a.site-a.vcf.lab/ui/,loading-container
https://vc-wld01-a.site-a.vcf.lab:5480/#/login,VMware vCenter Management
https://ops-a.site-a.vcf.lab/ui/,Operations
https://sddcmanager-a.site-a.vcf.lab/ui/,SDDC Manager
#https://vc-mgmt-b.site-b.vcf.lab/ui/,loading-container
#http://stg-01a.site-a.vcf.lab/account/login,TrueNAS
#==============================================================================
# [VCF] - VCF infrastructure components (ESXi, NSX, vCenter)
# Startup: Startup/VCF.py (Phases 1-5: connect hosts, check datastores,
# start NSX Manager, NSX Edges, post-edge VMs, vCenter)
# Shutdown: Shutdown/VCFshutdown.py reads this section for:
# - vcfmgmtcluster -> Phases 2, 18, 19, 20 (connect, host settings, vSAN, host shutdown)
# - vcfnsxmgr -> Phases 6, 15 (workload/mgmt NSX Manager, filtered by wld/mgmt)
# - vcfnsxedges -> Phases 5, 14 (workload/mgmt NSX Edges, filtered by wld/mgmt)
# - vcfvCenter -> Phases 7, 17 (workload/mgmt vCenter, filtered by wld/mgmt)
# - vspvms -> Phase 19b (VSP platform VM shutdown)
# Tools: confighol-9.0.py, confighol-9.1.py configure SSH on NSX;
# vpodchecker.py health checks
#==============================================================================
[VCF]
# ESXi hosts that bootstrap VCF (management cluster)
# Format: hostname:type (type is always 'esx')
# Startup: VCF.py Phase 1 - connect, exit maintenance mode, enable SSH
# VCFfinal.py - reconnect hosts
# Shutdown: VCFshutdown.py Phase 2 (connect), Phase 18 (host settings),
# Phase 19 (vSAN elevator), Phase 20 (host shutdown)
vcfmgmtcluster = esx-01a.site-a.vcf.lab:esx
esx-02a.site-a.vcf.lab:esx
esx-03a.site-a.vcf.lab:esx
esx-04a.site-a.vcf.lab:esx
esx-05a.site-a.vcf.lab:esx
esx-06a.site-a.vcf.lab:esx
esx-07a.site-a.vcf.lab:esx
# VCF management datastore name (verified during startup)
# Startup: VCF.py Phase 2 - verify datastore accessible
vcfmgmtdatastore = vsan-mgmt-01a
# NSX Manager VMs (VM display names as seen in vSphere)
# Format: nsx-vm-name
# Startup: VCF.py Phase 3 - power on NSX Manager (host-agnostic search)
# Shutdown: VCFshutdown.py Phase 6 (wld in name -> workload domain),
# Phase 15 (mgmt in name -> management domain)
# Tools: confighol-9.0.py, confighol-9.1.py configure SSH on NSX Managers;
# vpodchecker.py health checks
vcfnsxmgr = nsx-mgmt-01a
nsx-wld01-01a
# NSX Edge / VNA VMs (VM display names as seen in vSphere)
# Format: edge-vm-name
# Startup: VCF.py Phase 4 - power on NSX Edges (host-agnostic search)
# Shutdown: VCFshutdown.py Phase 5 (wld in name -> workload domain),
# Phase 14 (mgmt in name -> management domain)
# Tools: vpodchecker.py, confighol-9.0.py, confighol-9.1.py
# NOTE: vna-* VMs are treated as edges; they have VMware Tools but
# cannot be logged into directly - shutdown uses vSphere API
vcfnsxedges = vna-wld01-01a
vna-wld01-02a
#edge-mgmt-01a
#edge-mgmt-02a
# Post-Edge VMs - boot immediately after NSX Edges, before vCenter
# Format: vmname or vmname_regex
# Startup: VCF.py Phase 4b - power on (host-agnostic search)
# Shutdown: NOT used by shutdown scripts
vcfpostedgevms = auto-platform-a.*
license-a
# VCF vCenter VMs (VM display names as seen in vSphere)
# Format: vcenter-vm-name
# Startup: VCF.py Phase 5 - power on vCenter (host-agnostic search)
# Shutdown: VCFshutdown.py Phase 7 (wld in name -> workload domain),
# Phase 17 (mgmt in name -> management domain)
# Tools: confighol-9.1.py
vcfvCenter = vc-mgmt-a
vc-wld01-a
# VSP Platform VMs (regex pattern)
# Format: vm_regex:vcenter
# Startup: VCFfinal.py Task 2d - power on VSP VMs
# Shutdown: VCFshutdown.py Phase 19b - graceful shutdown before ESXi hosts
vspvms = vsp-01a-.*:vc-mgmt-a.site-a.vcf.lab
# SDDC Manager VM (for vpodchecker health checks)
# Format: vmname:vcenter
# Tools: vpodchecker.py
#sddcmanager = sddcmanager-a:vc-mgmt-a.site-a.vcf.lab
# VCF-specific URLs to check (separate from RESOURCES URLs)
# Format: url,expected_text
# Tools: vpodchecker.py, confighol-9.1.py
#urls = https://sddcmanager-a.site-a.vcf.lab/ui/,SDDC Manager
#==============================================================================
# [VCFFINAL] - Final VCF startup tasks (Tanzu, VCF Automation, VCF Components)
# Startup: Startup/VCFfinal.py (runs after VCF.py completes)
# Shutdown: Shutdown/VCFshutdown.py reads several keys from this section:
# - tanzucontrol -> Phase 3b (workload drain), Phase 3 (stop WCP), Phase 4 (Supervisor VM shutdown)
# - vravms -> Phase 1b (VCF Automation fallback when Fleet API down)
# - vcfcomponents -> Phase 2b (scale down K8s workloads on VSP)
#==============================================================================
[VCFFINAL]
# VCF Component Services on VSP management cluster (K8s workloads)
# Format: namespace:resource_type/resource_name
# Startup: VCFfinal.py Task 2e - kubectl scale --replicas=1 (scale UP)
# Shutdown: VCFshutdown.py Phase 2b - kubectl scale --replicas=0 (scale DOWN)
# Also annotates Component CRDs and suspends Postgres instances
vcfcomponents = salt:deployment/salt-master
salt:deployment/salt-minion
salt-raas:deployment/redis
salt-raas:deployment/raas
telemetry:deployment/telemetry-acceptor
vcf-fleet-depot:deployment/depot-service
vcf-fleet-depot:deployment/distribution-service
vcf-fleet-lcm:deployment/vcf-fleet-build-service-fleetbuild
vcf-fleet-lcm:deployment/vcf-fleet-upgrade-service-fleetupgrade
vcf-sddc-lcm:deployment/vcf-sddc-build-service-sddcbuild
vcf-sddc-lcm:deployment/vcf-sddc-upgrade-service-sddcupgrade
vidb-external:deployment/vidb-service
ops-logs:statefulset/log-processor
ops-logs:statefulset/log-store
vodap:deployment/vcf-obs-collector-controller-service
vodap:deployment/vcf-obs-data-query-service
vodap:deployment/vcf-obs-esx-collector-service
vodap:deployment/vcf-obs-netops-collector-service
vodap:deployment/vcf-obs-vc-collector-service
vodap:statefulset/chi-vcf-obs-vcf-obs-0-0
vodap:statefulset/chk-vcf-obs-keeper-keeper-0-0
vodap:statefulset/chk-vcf-obs-keeper-keeper-0-1
vodap:statefulset/chk-vcf-obs-keeper-keeper-0-2
vmsp-metrics-store:deployment/clickhouse-operator-altinity-clickhouse-operator
vmsp-metrics-store:deployment/vsp-metrics-store-operator
# VSP control plane IP (auto-discovered from VSP worker if not set)
# Used by: VCFfinal.py Task 2e (startup), VCFshutdown.py Phase 2b (shutdown)
#vspcontrolplaneip = 10.1.1.142
# Tanzu Supervisor Control Plane VMs to start
# Format: vm_regex:vcenter (vcenter optional)
# Startup: VCFfinal.py Task 1 - power on Supervisor VMs
# Shutdown: VCFshutdown.py Phase 3b (Supervisor workload drain),
# Phase 3 (stop WCP), Phase 4 (Supervisor VM shutdown + dynamic discovery)
tanzucontrol = SupervisorControlPlaneVM.*:vc-wld01-a.site-a.vcf.lab
# Tanzu Deployment scripts to run after control plane starts
# Format: host:account:script_path
# Startup: VCFfinal.py Task 3
#tanzudeploy = vc-wld01-a.site-a.vcf.lab:root:/usr/local/bin/tanzu-deploy.sh
# manager.site-a.vcf.lab:holuser:/home/holuser/scripts/deploy-tkg.sh
# VCF Automation VMs (started late in startup, shut down early)
# Format: vm_regex:vcenter
# Startup: VCFfinal.py Task 4 - power on VCF Automation VMs
# Shutdown: VCFshutdown.py Phase 1 (Fleet API power-off for 'vra'),
# Phase 1b (fallback: regex match these VMs if Fleet API unreachable)
# Tools: confighol-9.0.py, confighol-9.1.py
vravms = auto-platform-a.*:vc-mgmt-a.site-a.vcf.lab
# VCF Automation URLs to check after startup
# Format: url,expected_text
# Startup: VCFfinal.py Task 5
# Tools: vpodchecker.py
vraurls = https://auto-a.site-a.vcf.lab/login/,VCF Automation
https://auto-a.site-a.vcf.lab/cci/kubernetes/apis/project.cci.vmware.com/v1alpha2/projects,Status
# VCF Component URLs to check after startup
# Format: url,expected_text
# Startup: VCFfinal.py Task 6
vcfcomponenturls = https://opsnet-a.site-a.vcf.lab,Operations for Networks
https://opslogs-a.site-a.vcf.lab/en-US/,Log Management
https://fleet-01a.site-a.vcf.lab/fleet-lcm/v1/components
#==============================================================================
# [SHUTDOWN] - Shutdown-specific configuration
# Read by: Shutdown/VCFshutdown.py, Shutdown/Shutdown.py, Shutdown/fleet.py
# NOTE: NSX, vCenters, and ESXi hosts are read from [VCF] section above.
# This section contains shutdown-only overrides and VM name mappings.
#==============================================================================
[SHUTDOWN]
# Fleet Operations - VCF 9.0 legacy (SDDC Manager LCM API via opslcm-a)
# Used when [VCF] vcf_version = 9.0 or when VCF 9.1 auto-probe fails
# Shutdown: VCFshutdown.py Phase 1 -> fleet.py shutdown_products()
# API: POST /lcm/lcops/api/v2/environments/{envId}/products/{productId}/power-off
# Auth: Basic (base64 encoded credentials)
fleet_fqdn = opslcm-a.site-a.vcf.lab
fleet_username = admin@local
# Products/components to shutdown via Fleet Operations
# VCF 9.0: product IDs (vra, vrni, vrops, vrli)
# VCF 9.1: mapped to component types (VCFA, VRNI, VROPS, VRLI)
# Shutdown: VCFshutdown.py Phase 1, fleet.py
fleet_products = vra,vrni,vrops,vrli
# Docker containers to stop before VCF shutdown
# Shutdown: Shutdown.py Phase 1
shutdown_docker = true
docker_host = docker.site-a.vcf.lab
docker_user = holuser
docker_containers = gitlab,ldap,poste.io,flask
# VM regex patterns to find and shutdown (Tanzu/K8s workloads)
# Shutdown: VCFshutdown.py Phase 4
vm_patterns = ^kubernetes-cluster-.*$
^dev-project-.*$
^cci-service-.*$
^SupervisorControlPlaneVM.*$
# Specific workload VMs to shutdown by exact name
# Shutdown: VCFshutdown.py Phase 4
workload_vms = core-a
core-b
hol-ubuntu-001
# VMs to forcefully delete (regex patterns supported)
# Shutdown: VCFshutdown.py Phase 4
#vms_to_delete = avi-se.*
# dummy-vm.*
# ESXi username for SSH operations (host settings, vSAN elevator)
# Shutdown: VCFshutdown.py Phases 18, 19, 20
esx_username = root
# vSAN settings
# Shutdown: VCFshutdown.py Phase 19 (elevator operations)
# NOTE: ESA environments are auto-detected and skip the 45-minute wait
vsan_enabled = true
# vSAN timeout in seconds (45 minutes = 2700 seconds)
vsan_timeout = 2700
# Host shutdown toggle
# Shutdown: VCFshutdown.py Phase 20
shutdown_hosts = true
#--------------------------------------------------------------------------
# VCF Operations VM overrides for this lab
# These map actual VM names to shutdown phases.
# If commented out, VCFshutdown.py uses built-in defaults.
#--------------------------------------------------------------------------
# VCF Operations for Networks VMs (vrni) - VCF 9.0 mgmt domain order #2
# Shutdown: VCFshutdown.py Phase 8
#vcf_ops_networks_vms = opsnet-a
# opsnet-01a
# opsnetcollector-01a
# VCF Operations Collector VMs - VCF 9.0 mgmt domain order #3
# Shutdown: VCFshutdown.py Phase 9
#vcf_ops_collector_vms = opscollector-01a
# opsproxy-01a
# VCF Operations for Logs VMs (vrli) - VCF 9.0 mgmt domain order #4
# Shutdown: VCFshutdown.py Phase 10
#vcf_ops_logs_vms = opslogs-01a
# ops-01a
# ops-a
# VCF Identity Broker VMs - VCF 9.0 mgmt domain order #5
# Shutdown: VCFshutdown.py Phase 11
#vcf_identity_broker_vms =
# VCF Operations Fleet Management VMs - VCF 9.0 mgmt domain order #6
# Shutdown: VCFshutdown.py Phase 12
#vcf_ops_fleet_vms = opslcm-01a
# opslcm-a
# VCF Operations VMs (vrops + orchestrator) - VCF 9.0 mgmt domain order #7
# Shutdown: VCFshutdown.py Phase 13
#vcf_ops_vms = o11n-02a
# o11n-01a
# SDDC Manager VMs - VCF 9.0 mgmt domain order #11
# Shutdown: VCFshutdown.py Phase 16
#sddc_manager_vms = sddcmanager-a
# NOTE: The following are read automatically from [VCF] section:
# - NSX Edges: [VCF] vcfnsxedges -> Shutdown Phases 5/14 (filtered by wld/mgmt)
# - NSX Managers: [VCF] vcfnsxmgr -> Shutdown Phases 6/15 (filtered by wld/mgmt)
# - ESXi Hosts: [VCF] vcfmgmtcluster -> Shutdown Phases 18/19/20
# - vCenters: [VCF] vcfvCenter -> Shutdown Phases 7/17 (filtered by wld/mgmt)