|
1
|
|
|
import sys |
|
2
|
|
|
|
|
3
|
|
|
from pyVmomi import vim |
|
4
|
|
|
|
|
5
|
|
|
from vmwarelib import inventory |
|
6
|
|
|
from vmwarelib.actions import BaseAction |
|
7
|
|
|
|
|
8
|
|
|
|
|
9
|
|
|
class NewHardDisk(BaseAction): |
|
10
|
|
|
|
|
11
|
|
|
@staticmethod |
|
12
|
|
|
def get_backinginfo_for_existing_disk(disk_path): |
|
13
|
|
|
backing_info = vim.vm.device.VirtualDevice.FileBackingInfo() |
|
14
|
|
|
backing_info.fileName = disk_path |
|
15
|
|
|
return backing_info |
|
16
|
|
|
|
|
17
|
|
|
@staticmethod |
|
18
|
|
|
def get_flatfile_backinginfo(storage_format, persistence): |
|
19
|
|
|
backing_info = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() |
|
20
|
|
|
if storage_format == 'eagerzeroedthick': |
|
21
|
|
|
backing_info.thinProvisioned = False |
|
22
|
|
|
backing_info.eagerlyScrub = True |
|
23
|
|
|
elif storage_format == 'thin': |
|
24
|
|
|
backing_info.thinProvisioned = True |
|
25
|
|
|
elif storage_format == 'thin2gb': |
|
26
|
|
|
backing_info.thinProvisioned = True |
|
27
|
|
|
backing_info.split = True |
|
28
|
|
|
elif storage_format == 'thick': |
|
29
|
|
|
backing_info.thinProvisioned = False |
|
30
|
|
|
elif storage_format == 'thick2gb': |
|
31
|
|
|
backing_info.thinProvisioned = False |
|
32
|
|
|
backing_info.split = True |
|
33
|
|
|
backing_info.diskMode = persistence |
|
34
|
|
|
return backing_info |
|
35
|
|
|
|
|
36
|
|
|
@staticmethod |
|
37
|
|
|
def get_rawfile_backinginfo(device_name, persistence): |
|
38
|
|
|
backing_info = vim.vm.device.VirtualDisk.RawDiskMappingVer1BackingInfo() |
|
39
|
|
|
backing_info.deviceName = device_name |
|
40
|
|
|
backing_info.diskMode = persistence |
|
41
|
|
|
return backing_info |
|
42
|
|
|
|
|
43
|
|
|
@staticmethod |
|
44
|
|
|
def get_next_unit_number(vm): |
|
45
|
|
|
# See https://github.com/whereismyjetpack/pyvmomi-community-samples/blob/add-disk/samples/add_disk_to_vm.py |
|
46
|
|
|
unit_number = 0 |
|
47
|
|
|
for dev in vm.config.hardware.device: |
|
48
|
|
|
#if hasattr(dev.backing, 'fileName'): |
|
49
|
|
|
if isinstance(dev, vim.VirtualDisk): |
|
50
|
|
|
unit_number = int(dev.unitNumber) + 1 |
|
51
|
|
|
# unit_number 7 reserved for scsi controller |
|
52
|
|
|
if unit_number == 7: |
|
53
|
|
|
unit_number += 1 |
|
54
|
|
|
return unit_number |
|
55
|
|
|
|
|
56
|
|
|
@staticmethod |
|
57
|
|
|
def get_vm_reconfig_spec(vm, datastore, disk_type, storage_format, persistence, disk_path, device_name, capacity_gb): |
|
58
|
|
|
if disk_path: |
|
59
|
|
|
backing_info = NewHardDisk.get_backinginfo_for_existing_disk(disk_path) |
|
60
|
|
|
elif disk_type == 'flat': |
|
61
|
|
|
backing_info = NewHardDisk.get_flatfile_backinginfo(storage_format, persistence); |
|
62
|
|
|
elif disk_type.startswith('raw'): |
|
63
|
|
|
backing_info = get_rawfile_backinginfo(device_name, persistence); |
|
64
|
|
|
else: |
|
65
|
|
|
raise Exception("Wrong disk_type and empty disk_path. Either one should be present.") |
|
66
|
|
|
backing_info.datastore = datastore |
|
67
|
|
|
|
|
68
|
|
|
#creating Virtual Disk Device |
|
69
|
|
|
virtual_disk = vim.vm.device.VirtualDisk() |
|
70
|
|
|
virtual_disk.backing = backing_info |
|
71
|
|
|
virtual_disk.capacityInKB = (int(capacity_gb * 1024 * 1024) if disk_path=='' else 0) |
|
72
|
|
|
virtual_disk.controllerKey = 1000 |
|
73
|
|
|
virtual_disk.unitNumber = NewHardDisk.get_next_unit_number(vm) |
|
74
|
|
|
|
|
75
|
|
|
#creating Virtual Device Spec |
|
76
|
|
|
disk_spec = vim.vm.device.VirtualDeviceSpec() |
|
77
|
|
|
if not disk_path: |
|
78
|
|
|
disk_spec.fileOperation = "create" |
|
79
|
|
|
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add |
|
80
|
|
|
disk_spec.device = virtual_disk |
|
81
|
|
|
|
|
82
|
|
|
#creating reconfig spec |
|
83
|
|
|
vm_reconfig_spec = vim.vm.ConfigSpec() |
|
84
|
|
|
vm_reconfig_spec.deviceChange = [disk_spec] |
|
85
|
|
|
return vm_reconfig_spec |
|
86
|
|
|
|
|
87
|
|
|
@staticmethod |
|
88
|
|
|
def get_storage_placement_spec(ds_clust_obj, vm, vm_reconfig_spec): |
|
89
|
|
|
storage_placement_spec = vim.storageDrs.StoragePlacementSpec() |
|
90
|
|
|
storage_placement_spec.type = vim.storageDrs.StoragePlacementSpec.\ |
|
91
|
|
|
PlacementType.reconfigure |
|
92
|
|
|
storage_placement_spec.configSpec = vm_reconfig_spec |
|
93
|
|
|
storage_placement_spec.podSelectionSpec = vim.storageDrs.PodSelectionSpec() |
|
94
|
|
|
storage_placement_spec.vm = vm |
|
95
|
|
|
|
|
96
|
|
|
vm_pod_cfg = vim.storageDrs.PodSelectionSpec.VmPodConfig() |
|
97
|
|
|
vm_pod_cfg.storagePod = ds_clust_obj |
|
98
|
|
|
disk_locator = vim.storageDrs.PodSelectionSpec.DiskLocator() |
|
99
|
|
|
disk_locator.diskBackingInfo = vm_reconfig_spec.deviceChange[0].device.backing |
|
100
|
|
|
vm_pod_cfg.disk = [disk_locator] |
|
101
|
|
|
storage_placement_spec.podSelectionSpec.initialVmConfig = [vm_pod_cfg] |
|
102
|
|
|
|
|
103
|
|
|
return storage_placement_spec |
|
104
|
|
|
|
|
105
|
|
|
def run(self, vms, persistence='Persistent', disk_type='flat', capacity_gb=1, datastore=None, |
|
106
|
|
|
datastore_cluster=None, device_name=None, disk_path='', storage_format='Thin'): |
|
107
|
|
|
#TODO: 'controller' parameter is missing here. The reason is because we do not support passing real objects like PowerCli |
|
108
|
|
|
#and there is no uuid to find and address the controller in the system. |
|
109
|
|
|
persistence = persistence.lower(); |
|
110
|
|
|
disk_type = disk_type.lower(); |
|
111
|
|
|
storage_format = storage_format.lower(); |
|
112
|
|
|
|
|
113
|
|
|
si = self.si |
|
114
|
|
|
si_content = si.RetrieveContent() |
|
115
|
|
|
vm_objs = [vim.VirtualMachine(moid, stub=si._stub) for moid in vms] |
|
116
|
|
|
vm_names = [vm_obj.name for vm_obj in vm_objs] # by checking the name property, the vms' existance is checked. |
|
117
|
|
|
datastore_obj = None |
|
118
|
|
|
if datastore: |
|
119
|
|
|
datastore_obj = vim.Datastore(datastore, stub=si._stub) |
|
120
|
|
|
datastore_obj.name# by checking the name property, the vms' existance is checked. |
|
121
|
|
|
|
|
122
|
|
|
|
|
123
|
|
|
result=[] |
|
124
|
|
|
|
|
125
|
|
|
if datastore_cluster: |
|
126
|
|
|
ds_clust_obj = vim.StoragePod(datastore_cluster, stub=si._stub) |
|
127
|
|
|
ds_clust_obj.name # by retrieving the name property, the existance is checked. |
|
128
|
|
|
srm = si_content.storageResourceManager |
|
129
|
|
|
|
|
130
|
|
|
for vm in vm_objs: |
|
131
|
|
|
vm_reconfig_spec = NewHardDisk.get_vm_reconfig_spec(vm, datastore_obj, disk_type, storage_format, persistence, disk_path, device_name, capacity_gb) |
|
132
|
|
|
|
|
133
|
|
|
storage_placement_spec = NewHardDisk.get_storage_placement_spec(ds_clust_obj, vm, vm_reconfig_spec) |
|
134
|
|
|
datastores = srm.RecommendDatastores(storageSpec=storage_placement_spec) |
|
135
|
|
|
if not datastores.recommendations: |
|
136
|
|
|
sys.stderr.write('Skipping the vm. There is no datastore recommendation for vm' + vm.obj._GetMoId()) |
|
137
|
|
|
add_disk_task = srm.ApplyStorageDrsRecommendation_Task( |
|
138
|
|
|
datastores.recommendations[0].key) |
|
139
|
|
|
successfully_added_disk = self._wait_for_task(add_disk_task) |
|
140
|
|
|
result.append({ |
|
141
|
|
|
"vm_moid":vm._GetMoId(), |
|
142
|
|
|
"success":successfully_added_disk |
|
143
|
|
|
}) |
|
144
|
|
|
else: |
|
145
|
|
|
for vm in vm_objs: |
|
146
|
|
|
vm_reconfig_spec = NewHardDisk.get_vm_reconfig_spec(vm, datastore_obj, disk_type, storage_format, persistence, disk_path, device_name, capacity_gb) |
|
147
|
|
|
add_disk_task = vm.ReconfigVM_Task(spec=vm_reconfig_spec) |
|
148
|
|
|
successfully_added_disk = self._wait_for_task(add_disk_task) |
|
149
|
|
|
result.append({ |
|
150
|
|
|
"vm_moid":vm._GetMoId(), |
|
151
|
|
|
"success":successfully_added_disk |
|
152
|
|
|
}) |
|
153
|
|
|
|
|
154
|
|
|
return result |
|
155
|
|
|
|