Coverage for drivers/LVHDSR.py : 44%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVHDSR: VHD on LVM storage repository
19#
21import SR
22from SR import deviceCheck
23import VDI
24import SRCommand
25import util
26import lvutil
27import lvmcache
28import vhdutil
29import lvhdutil
30import scsiutil
31import os
32import sys
33import time
34import errno
35import xs_errors
36import cleanup
37import blktap2
38from journaler import Journaler
39from lock import Lock
40from refcounter import RefCounter
41from ipc import IPCFlag
42from lvmanager import LVActivator
43import XenAPI # pylint: disable=import-error
44import re
45from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
46 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
47 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
48 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
49 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
50from metadata import retrieveXMLfromFile, _parseXML
51from xmlrpc.client import DateTime
52import glob
53from constants import CBTLOG_TAG
54from fairlock import Fairlock
55DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX)
57geneology = {}
58CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
59 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
60 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
61 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
62 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
64CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
66DRIVER_INFO = {
67 'name': 'Local VHD on LVM',
68 'description': 'SR plugin which represents disks as VHD disks on ' + \
69 'Logical Volumes within a locally-attached Volume Group',
70 'vendor': 'XenSource Inc',
71 'copyright': '(C) 2008 XenSource Inc',
72 'driver_version': '1.0',
73 'required_api_version': '1.0',
74 'capabilities': CAPABILITIES,
75 'configuration': CONFIGURATION
76 }
78PARAM_VHD = "vhd"
79PARAM_RAW = "raw"
81OPS_EXCLUSIVE = [
82 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
83 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
84 "vdi_clone"]
86# Log if snapshot pauses VM for more than this many seconds
87LONG_SNAPTIME = 60
89class LVHDSR(SR.SR):
90 DRIVER_TYPE = 'lvhd'
92 PROVISIONING_TYPES = ["thin", "thick"]
93 PROVISIONING_DEFAULT = "thick"
94 THIN_PLUGIN = "lvhd-thin"
96 PLUGIN_ON_SLAVE = "on-slave"
98 FLAG_USE_VHD = "use_vhd"
99 MDVOLUME_NAME = "MGT"
101 ALLOCATION_QUANTUM = "allocation_quantum"
102 INITIAL_ALLOCATION = "initial_allocation"
104 LOCK_RETRY_INTERVAL = 3
105 LOCK_RETRY_ATTEMPTS = 10
107 TEST_MODE_KEY = "testmode"
108 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
109 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
110 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
111 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
112 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
113 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
114 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
116 ENV_VAR_VHD_TEST = {
117 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
118 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
119 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
120 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
121 TEST_MODE_VHD_FAIL_REPARENT_END:
122 "VHD_UTIL_TEST_FAIL_REPARENT_END",
123 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
124 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
125 TEST_MODE_VHD_FAIL_RESIZE_DATA:
126 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
127 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
128 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
129 TEST_MODE_VHD_FAIL_RESIZE_END:
130 "VHD_UTIL_TEST_FAIL_RESIZE_END"
131 }
132 testMode = ""
134 legacyMode = True
136 def handles(type):
137 """Returns True if this SR class understands the given dconf string"""
138 # we can pose as LVMSR or EXTSR for compatibility purposes
139 if __name__ == '__main__':
140 name = sys.argv[0]
141 else:
142 name = __name__
143 if name.endswith("LVMSR"):
144 return type == "lvm"
145 elif name.endswith("EXTSR"):
146 return type == "ext"
147 return type == LVHDSR.DRIVER_TYPE
148 handles = staticmethod(handles)
150 def load(self, sr_uuid):
151 self.ops_exclusive = OPS_EXCLUSIVE
153 self.isMaster = False
154 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
155 self.isMaster = True
157 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
158 self.sr_vditype = SR.DEFAULT_TAP
159 self.uuid = sr_uuid
160 self.vgname = lvhdutil.VG_PREFIX + self.uuid
161 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname)
162 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
163 self.provision = self.PROVISIONING_DEFAULT
164 try:
165 self.lvmCache = lvmcache.LVMCache(self.vgname)
166 except:
167 raise xs_errors.XenError('SRUnavailable', \
168 opterr='Failed to initialise the LVMCache')
169 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
170 self.journaler = Journaler(self.lvmCache)
171 if not self.srcmd.params.get("sr_ref"):
172 return # must be a probe call
173 # Test for thick vs thin provisioning conf parameter
174 if 'allocation' in self.dconf: 174 ↛ 175line 174 didn't jump to line 175, because the condition on line 174 was never true
175 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
176 self.provision = self.dconf['allocation']
177 else:
178 raise xs_errors.XenError('InvalidArg', \
179 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
181 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
182 if self.other_conf.get(self.TEST_MODE_KEY): 182 ↛ 186line 182 didn't jump to line 186, because the condition on line 182 was never false
183 self.testMode = self.other_conf[self.TEST_MODE_KEY]
184 self._prepareTestMode()
186 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
187 # sm_config flag overrides PBD, if any
188 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
189 self.provision = self.sm_config.get('allocation')
191 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
192 self.legacyMode = False
194 if lvutil._checkVG(self.vgname):
195 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 195 ↛ 198line 195 didn't jump to line 198, because the condition on line 195 was never false
196 "vdi_activate", "vdi_deactivate"]:
197 self._undoAllJournals()
198 if not self.cmd in ["sr_attach", "sr_probe"]:
199 self._checkMetadataVolume()
201 self.mdexists = False
203 # get a VDI -> TYPE map from the storage
204 contains_uuid_regex = \
205 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
206 self.storageVDIs = {}
208 for key in self.lvmCache.lvs.keys(): 208 ↛ 210line 208 didn't jump to line 210, because the loop on line 208 never started
209 # if the lvname has a uuid in it
210 type = None
211 if contains_uuid_regex.search(key) is not None:
212 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):
213 type = vhdutil.VDI_TYPE_VHD
214 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
215 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]):
216 type = vhdutil.VDI_TYPE_RAW
217 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
218 else:
219 continue
221 if type is not None:
222 self.storageVDIs[vdi] = type
224 # check if metadata volume exists
225 try:
226 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
227 except:
228 pass
230 def cleanup(self):
231 # we don't need to hold the lock to dec refcounts of activated LVs
232 if not self.lvActivator.deactivateAll(): 232 ↛ 233line 232 didn't jump to line 233, because the condition on line 232 was never true
233 raise util.SMException("failed to deactivate LVs")
235 def updateSRMetadata(self, allocation):
236 try:
237 # Add SR specific SR metadata
238 sr_info = \
239 {ALLOCATION_TAG: allocation,
240 UUID_TAG: self.uuid,
241 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
242 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
243 }
245 vdi_info = {}
246 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
247 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
249 # Create the VDI entry in the SR metadata
250 vdi_info[vdi_uuid] = \
251 {
252 UUID_TAG: vdi_uuid,
253 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
254 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
255 IS_A_SNAPSHOT_TAG: \
256 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
257 SNAPSHOT_OF_TAG: \
258 self.session.xenapi.VDI.get_snapshot_of(vdi),
259 SNAPSHOT_TIME_TAG: \
260 self.session.xenapi.VDI.get_snapshot_time(vdi),
261 TYPE_TAG: \
262 self.session.xenapi.VDI.get_type(vdi),
263 VDI_TYPE_TAG: \
264 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'],
265 READ_ONLY_TAG: \
266 int(self.session.xenapi.VDI.get_read_only(vdi)),
267 METADATA_OF_POOL_TAG: \
268 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
269 MANAGED_TAG: \
270 int(self.session.xenapi.VDI.get_managed(vdi))
271 }
272 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
274 except Exception as e:
275 raise xs_errors.XenError('MetadataError', \
276 opterr='Error upgrading SR Metadata: %s' % str(e))
278 def syncMetadataAndStorage(self):
279 try:
280 # if a VDI is present in the metadata but not in the storage
281 # then delete it from the metadata
282 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
283 for vdi in list(vdi_info.keys()):
284 update_map = {}
285 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 285 ↛ 292line 285 didn't jump to line 292, because the condition on line 285 was never false
286 # delete this from metadata
287 LVMMetadataHandler(self.mdpath). \
288 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
289 else:
290 # search for this in the metadata, compare types
291 # self.storageVDIs is a map of vdi_uuid to vdi_type
292 if vdi_info[vdi][VDI_TYPE_TAG] != \
293 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
294 # storage type takes authority
295 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
296 = METADATA_OBJECT_TYPE_VDI
297 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
298 update_map[VDI_TYPE_TAG] = \
299 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
300 LVMMetadataHandler(self.mdpath) \
301 .updateMetadata(update_map)
302 else:
303 # This should never happen
304 pass
306 except Exception as e:
307 raise xs_errors.XenError('MetadataError', \
308 opterr='Error synching SR Metadata and storage: %s' % str(e))
310 def syncMetadataAndXapi(self):
311 try:
312 # get metadata
313 (sr_info, vdi_info) = \
314 LVMMetadataHandler(self.mdpath, False).getMetadata()
316 # First synch SR parameters
317 self.update(self.uuid)
319 # Now update the VDI information in the metadata if required
320 for vdi_offset in vdi_info.keys():
321 try:
322 vdi_ref = \
323 self.session.xenapi.VDI.get_by_uuid( \
324 vdi_info[vdi_offset][UUID_TAG])
325 except:
326 # may be the VDI is not in XAPI yet dont bother
327 continue
329 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
330 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
332 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
333 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
334 new_name_description:
335 update_map = {}
336 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
337 METADATA_OBJECT_TYPE_VDI
338 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
339 update_map[NAME_LABEL_TAG] = new_name_label
340 update_map[NAME_DESCRIPTION_TAG] = new_name_description
341 LVMMetadataHandler(self.mdpath) \
342 .updateMetadata(update_map)
343 except Exception as e:
344 raise xs_errors.XenError('MetadataError', \
345 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
347 def _checkMetadataVolume(self):
348 util.SMlog("Entering _checkMetadataVolume")
349 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
350 if self.isMaster: 350 ↛ 366line 350 didn't jump to line 366, because the condition on line 350 was never false
351 if self.mdexists and self.cmd == "sr_attach":
352 try:
353 # activate the management volume
354 # will be deactivated at detach time
355 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
356 self._synchSmConfigWithMetaData()
357 util.SMlog("Sync SR metadata and the state on the storage.")
358 self.syncMetadataAndStorage()
359 self.syncMetadataAndXapi()
360 except Exception as e:
361 util.SMlog("Exception in _checkMetadataVolume, " \
362 "Error: %s." % str(e))
363 elif not self.mdexists and not self.legacyMode: 363 ↛ 366line 363 didn't jump to line 366, because the condition on line 363 was never false
364 self._introduceMetaDataVolume()
366 if self.mdexists:
367 self.legacyMode = False
369 def _synchSmConfigWithMetaData(self):
370 util.SMlog("Synching sm-config with metadata volume")
372 try:
373 # get SR info from metadata
374 sr_info = {}
375 map = {}
376 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
378 if sr_info == {}: 378 ↛ 379line 378 didn't jump to line 379, because the condition on line 378 was never true
379 raise Exception("Failed to get SR information from metadata.")
381 if "allocation" in sr_info: 381 ↛ 385line 381 didn't jump to line 385, because the condition on line 381 was never false
382 self.provision = sr_info.get("allocation")
383 map['allocation'] = sr_info.get("allocation")
384 else:
385 raise Exception("Allocation key not found in SR metadata. "
386 "SR info found: %s" % sr_info)
388 except Exception as e:
389 raise xs_errors.XenError(
390 'MetadataError',
391 opterr='Error reading SR params from '
392 'metadata Volume: %s' % str(e))
393 try:
394 map[self.FLAG_USE_VHD] = 'true'
395 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
396 except:
397 raise xs_errors.XenError(
398 'MetadataError',
399 opterr='Error updating sm_config key')
401 def _introduceMetaDataVolume(self):
402 util.SMlog("Creating Metadata volume")
403 try:
404 config = {}
405 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
407 # activate the management volume, will be deactivated at detach time
408 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
410 name_label = util.to_plain_string( \
411 self.session.xenapi.SR.get_name_label(self.sr_ref))
412 name_description = util.to_plain_string( \
413 self.session.xenapi.SR.get_name_description(self.sr_ref))
414 config[self.FLAG_USE_VHD] = "true"
415 config['allocation'] = self.provision
416 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
418 # Add the SR metadata
419 self.updateSRMetadata(self.provision)
420 except Exception as e:
421 raise xs_errors.XenError('MetadataError', \
422 opterr='Error introducing Metadata Volume: %s' % str(e))
424 def _removeMetadataVolume(self):
425 if self.mdexists:
426 try:
427 self.lvmCache.remove(self.MDVOLUME_NAME)
428 except:
429 raise xs_errors.XenError('MetadataError', \
430 opterr='Failed to delete MGT Volume')
432 def _refresh_size(self):
433 """
434 Refreshs the size of the backing device.
435 Return true if all paths/devices agree on the same size.
436 """
437 if hasattr(self, 'SCSIid'): 437 ↛ 439line 437 didn't jump to line 439, because the condition on line 437 was never true
438 # LVHDoHBASR, LVHDoISCSISR
439 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
440 else:
441 # LVHDSR
442 devices = self.dconf['device'].split(',')
443 scsiutil.refreshdev(devices)
444 return True
446 def _expand_size(self):
447 """
448 Expands the size of the SR by growing into additional availiable
449 space, if extra space is availiable on the backing device.
450 Needs to be called after a successful call of _refresh_size.
451 """
452 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
453 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
454 resizethreshold = 100 * 1024 * 1024 # 100MB
455 devices = self.dconf['device'].split(',')
456 totaldevicesize = 0
457 for device in devices:
458 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
459 if totaldevicesize >= (currentvgsize + resizethreshold):
460 try:
461 if hasattr(self, 'SCSIid'): 461 ↛ 463line 461 didn't jump to line 463, because the condition on line 461 was never true
462 # LVHDoHBASR, LVHDoISCSISR might have slaves
463 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
464 getattr(self, 'SCSIid'))
465 util.SMlog("LVHDSR._expand_size for %s will resize the pv." %
466 self.uuid)
467 for pv in lvutil.get_pv_for_vg(self.vgname):
468 lvutil.resizePV(pv)
469 except:
470 util.logException("LVHDSR._expand_size for %s failed to resize"
471 " the PV" % self.uuid)
473 @deviceCheck
474 def create(self, uuid, size):
475 util.SMlog("LVHDSR.create for %s" % self.uuid)
476 if not self.isMaster:
477 util.SMlog('sr_create blocked for non-master')
478 raise xs_errors.XenError('LVMMaster')
480 if lvutil._checkVG(self.vgname):
481 raise xs_errors.XenError('SRExists')
483 # Check none of the devices already in use by other PBDs
484 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
485 raise xs_errors.XenError('SRInUse')
487 # Check serial number entry in SR records
488 for dev in self.dconf['device'].split(','):
489 if util.test_scsiserial(self.session, dev):
490 raise xs_errors.XenError('SRInUse')
492 lvutil.createVG(self.dconf['device'], self.vgname)
494 #Update serial number string
495 scsiutil.add_serial_record(self.session, self.sr_ref, \
496 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
498 # since this is an SR.create turn off legacy mode
499 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
500 self.FLAG_USE_VHD, 'true')
502 def delete(self, uuid):
503 util.SMlog("LVHDSR.delete for %s" % self.uuid)
504 if not self.isMaster:
505 raise xs_errors.XenError('LVMMaster')
506 cleanup.gc_force(self.session, self.uuid)
508 success = True
509 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
510 if util.extractSRFromDevMapper(fileName) != self.uuid:
511 continue
513 if util.doesFileHaveOpenHandles(fileName):
514 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \
515 "handles" % fileName)
516 success = False
517 continue
519 # Now attempt to remove the dev mapper entry
520 if not lvutil.removeDevMapperEntry(fileName, False):
521 success = False
522 continue
524 try:
525 lvname = os.path.basename(fileName.replace('-', '/'). \
526 replace('//', '-'))
527 lpath = os.path.join(self.path, lvname)
528 os.unlink(lpath)
529 except OSError as e:
530 if e.errno != errno.ENOENT:
531 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \
532 "file %s. Error: %s" % (fileName, str(e)))
533 success = False
535 if success:
536 try:
537 if util.pathexists(self.path):
538 os.rmdir(self.path)
539 except Exception as e:
540 util.SMlog("LVHDSR.delete: failed to remove the symlink " \
541 "directory %s. Error: %s" % (self.path, str(e)))
542 success = False
544 self._removeMetadataVolume()
545 self.lvmCache.refresh()
546 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0:
547 raise xs_errors.XenError('SRNotEmpty')
549 if not success:
550 raise Exception("LVHDSR delete failed, please refer to the log " \
551 "for details.")
553 lvutil.removeVG(self.dconf['device'], self.vgname)
554 self._cleanup()
556 def attach(self, uuid):
557 util.SMlog("LVHDSR.attach for %s" % self.uuid)
559 self._cleanup(True) # in case of host crashes, if detach wasn't called
561 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 561 ↛ 562line 561 didn't jump to line 562, because the condition on line 561 was never true
562 raise xs_errors.XenError('SRUnavailable', \
563 opterr='no such volume group: %s' % self.vgname)
565 # Refresh the metadata status
566 self._checkMetadataVolume()
568 refreshsizeok = self._refresh_size()
570 if self.isMaster: 570 ↛ 581line 570 didn't jump to line 581, because the condition on line 570 was never false
571 if refreshsizeok: 571 ↛ 575line 571 didn't jump to line 575, because the condition on line 571 was never false
572 self._expand_size()
574 # Update SCSIid string
575 util.SMlog("Calling devlist_to_serial")
576 scsiutil.add_serial_record(
577 self.session, self.sr_ref,
578 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
580 # Test Legacy Mode Flag and update if VHD volumes exist
581 if self.isMaster and self.legacyMode: 581 ↛ 582line 581 didn't jump to line 582, because the condition on line 581 was never true
582 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
583 for uuid, info in vdiInfo.items():
584 if info.vdiType == vhdutil.VDI_TYPE_VHD:
585 self.legacyMode = False
586 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
587 self._introduceMetaDataVolume()
588 break
590 # Set the block scheduler
591 for dev in self.dconf['device'].split(','):
592 self.block_setscheduler(dev)
594 def detach(self, uuid):
595 util.SMlog("LVHDSR.detach for %s" % self.uuid)
596 cleanup.abort(self.uuid)
598 # Do a best effort cleanup of the dev mapper entries
599 # go through all devmapper entries for this VG
600 success = True
601 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
602 if util.extractSRFromDevMapper(fileName) != self.uuid: 602 ↛ 603line 602 didn't jump to line 603, because the condition on line 602 was never true
603 continue
605 with Fairlock('devicemapper'):
606 # check if any file has open handles
607 if util.doesFileHaveOpenHandles(fileName):
608 # if yes, log this and signal failure
609 util.SMlog(
610 f"LVHDSR.detach: The dev mapper entry {fileName} has "
611 "open handles")
612 success = False
613 continue
615 # Now attempt to remove the dev mapper entry
616 if not lvutil.removeDevMapperEntry(fileName, False): 616 ↛ 617line 616 didn't jump to line 617, because the condition on line 616 was never true
617 success = False
618 continue
620 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
621 try:
622 lvname = os.path.basename(fileName.replace('-', '/'). \
623 replace('//', '-'))
624 lvname = os.path.join(self.path, lvname)
625 util.force_unlink(lvname)
626 except Exception as e:
627 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \
628 "file %s. Error: %s" % (fileName, str(e)))
629 success = False
631 # now remove the directory where the symlinks are
632 # this should pass as the directory should be empty by now
633 if success:
634 try:
635 if util.pathexists(self.path): 635 ↛ 636line 635 didn't jump to line 636, because the condition on line 635 was never true
636 os.rmdir(self.path)
637 except Exception as e:
638 util.SMlog("LVHDSR.detach: failed to remove the symlink " \
639 "directory %s. Error: %s" % (self.path, str(e)))
640 success = False
642 if not success:
643 raise Exception("SR detach failed, please refer to the log " \
644 "for details.")
646 # Don't delete lock files on the master as it will break the locking
647 # between SM and any GC thread that survives through SR.detach.
648 # However, we should still delete lock files on slaves as it is the
649 # only place to do so.
650 self._cleanup(self.isMaster)
652 def forget_vdi(self, uuid):
653 if not self.legacyMode:
654 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
655 super(LVHDSR, self).forget_vdi(uuid)
657 def scan(self, uuid):
658 activated = True
659 try:
660 lvname = ''
661 util.SMlog("LVHDSR.scan for %s" % self.uuid)
662 if not self.isMaster: 662 ↛ 663line 662 didn't jump to line 663, because the condition on line 662 was never true
663 util.SMlog('sr_scan blocked for non-master')
664 raise xs_errors.XenError('LVMMaster')
666 if self._refresh_size(): 666 ↛ 668line 666 didn't jump to line 668, because the condition on line 666 was never false
667 self._expand_size()
668 self.lvmCache.refresh()
669 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
670 self._loadvdis()
671 stats = lvutil._getVGstats(self.vgname)
672 self.physical_size = stats['physical_size']
673 self.physical_utilisation = stats['physical_utilisation']
675 # Now check if there are any VDIs in the metadata, which are not in
676 # XAPI
677 if self.mdexists: 677 ↛ 787line 677 didn't jump to line 787, because the condition on line 677 was never false
678 vdiToSnaps = {}
679 # get VDIs from XAPI
680 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
681 vdi_uuids = set([])
682 for vdi in vdis:
683 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
685 Dict = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
687 for vdi in list(Dict.keys()):
688 vdi_uuid = Dict[vdi][UUID_TAG]
689 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): 689 ↛ 690line 689 didn't jump to line 690, because the condition on line 689 was never true
690 if Dict[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
691 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
692 else:
693 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
695 if vdi_uuid not in vdi_uuids: 695 ↛ 696line 695 didn't jump to line 696, because the condition on line 695 was never true
696 util.SMlog("Introduce VDI %s as it is present in " \
697 "metadata and not in XAPI." % vdi_uuid)
698 sm_config = {}
699 sm_config['vdi_type'] = Dict[vdi][VDI_TYPE_TAG]
700 lvname = "%s%s" % \
701 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
702 self.lvmCache.activateNoRefcount(lvname)
703 activated = True
704 lvPath = os.path.join(self.path, lvname)
706 if Dict[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW:
707 size = self.lvmCache.getSize( \
708 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \
709 vdi_uuid)
710 utilisation = \
711 util.roundup(lvutil.LVM_SIZE_INCREMENT,
712 int(size))
713 else:
714 parent = \
715 vhdutil._getVHDParentNoCheck(lvPath)
717 if parent is not None:
718 sm_config['vhd-parent'] = parent[len( \
719 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):]
720 size = vhdutil.getSizeVirt(lvPath)
721 if self.provision == "thin":
722 utilisation = \
723 util.roundup(lvutil.LVM_SIZE_INCREMENT,
724 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
725 else:
726 utilisation = lvhdutil.calcSizeVHDLV(int(size))
728 vdi_ref = self.session.xenapi.VDI.db_introduce(
729 vdi_uuid,
730 Dict[vdi][NAME_LABEL_TAG],
731 Dict[vdi][NAME_DESCRIPTION_TAG],
732 self.sr_ref,
733 Dict[vdi][TYPE_TAG],
734 False,
735 bool(int(Dict[vdi][READ_ONLY_TAG])),
736 {},
737 vdi_uuid,
738 {},
739 sm_config)
741 self.session.xenapi.VDI.set_managed(vdi_ref,
742 bool(int(Dict[vdi][MANAGED_TAG])))
743 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
744 str(size))
745 self.session.xenapi.VDI.set_physical_utilisation( \
746 vdi_ref, str(utilisation))
747 self.session.xenapi.VDI.set_is_a_snapshot( \
748 vdi_ref, bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])))
749 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])):
750 self.session.xenapi.VDI.set_snapshot_time( \
751 vdi_ref, DateTime(Dict[vdi][SNAPSHOT_TIME_TAG]))
752 if Dict[vdi][TYPE_TAG] == 'metadata':
753 self.session.xenapi.VDI.set_metadata_of_pool( \
754 vdi_ref, Dict[vdi][METADATA_OF_POOL_TAG])
756 # Update CBT status of disks either just added
757 # or already in XAPI
758 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
759 if cbt_logname in cbt_vdis: 759 ↛ 760line 759 didn't jump to line 760, because the condition on line 759 was never true
760 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
761 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
762 # For existing VDIs, update local state too
763 # Scan in base class SR updates existing VDIs
764 # again based on local states
765 if vdi_uuid in self.vdis:
766 self.vdis[vdi_uuid].cbt_enabled = True
767 cbt_vdis.remove(cbt_logname)
769 # Now set the snapshot statuses correctly in XAPI
770 for srcvdi in vdiToSnaps.keys(): 770 ↛ 771line 770 didn't jump to line 771, because the loop on line 770 never started
771 try:
772 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
773 except:
774 # the source VDI no longer exists, continue
775 continue
777 for snapvdi in vdiToSnaps[srcvdi]:
778 try:
779 # this might fail in cases where its already set
780 snapref = \
781 self.session.xenapi.VDI.get_by_uuid(snapvdi)
782 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
783 except Exception as e:
784 util.SMlog("Setting snapshot failed. " \
785 "Error: %s" % str(e))
787 if cbt_vdis: 787 ↛ 798line 787 didn't jump to line 798, because the condition on line 787 was never false
788 # If we have items remaining in this list,
789 # they are cbt_metadata VDI that XAPI doesn't know about
790 # Add them to self.vdis and they'll get added to the DB
791 for cbt_vdi in cbt_vdis: 791 ↛ 792line 791 didn't jump to line 792, because the loop on line 791 never started
792 cbt_uuid = cbt_vdi.split(".")[0]
793 new_vdi = self.vdi(cbt_uuid)
794 new_vdi.ty = "cbt_metadata"
795 new_vdi.cbt_enabled = True
796 self.vdis[cbt_uuid] = new_vdi
798 super(LVHDSR, self).scan(uuid)
799 self._kickGC()
801 finally:
802 if lvname != '' and activated: 802 ↛ 803line 802 didn't jump to line 803, because the condition on line 802 was never true
803 self.lvmCache.deactivateNoRefcount(lvname)
805 def update(self, uuid):
806 if not lvutil._checkVG(self.vgname): 806 ↛ 807line 806 didn't jump to line 807, because the condition on line 806 was never true
807 return
808 self._updateStats(uuid, 0)
810 if self.legacyMode: 810 ↛ 811line 810 didn't jump to line 811, because the condition on line 810 was never true
811 return
813 # synch name_label in metadata with XAPI
814 update_map = {}
815 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
816 METADATA_OBJECT_TYPE_SR,
817 NAME_LABEL_TAG: util.to_plain_string( \
818 self.session.xenapi.SR.get_name_label(self.sr_ref)),
819 NAME_DESCRIPTION_TAG: util.to_plain_string( \
820 self.session.xenapi.SR.get_name_description(self.sr_ref))
821 }
822 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
824 def _updateStats(self, uuid, virtAllocDelta):
825 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
826 self.virtual_allocation = valloc + virtAllocDelta
827 util.SMlog("Setting virtual_allocation of SR %s to %d" %
828 (uuid, self.virtual_allocation))
829 stats = lvutil._getVGstats(self.vgname)
830 self.physical_size = stats['physical_size']
831 self.physical_utilisation = stats['physical_utilisation']
832 self._db_update()
834 @deviceCheck
835 def probe(self):
836 return lvutil.srlist_toxml(
837 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']),
838 lvhdutil.VG_PREFIX,
839 ('metadata' in self.srcmd.params['sr_sm_config'] and \
840 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
842 def vdi(self, uuid):
843 return LVHDVDI(self, uuid)
845 def _loadvdis(self):
846 self.virtual_allocation = 0
847 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
848 self.allVDIs = {}
850 for uuid, info in self.vdiInfo.items():
851 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 851 ↛ 852line 851 didn't jump to line 852, because the condition on line 851 was never true
852 continue
853 if info.scanError: 853 ↛ 854line 853 didn't jump to line 854, because the condition on line 853 was never true
854 raise xs_errors.XenError('VDIUnavailable', \
855 opterr='Error scanning VDI %s' % uuid)
856 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
857 if not self.vdis[uuid].hidden: 857 ↛ 850line 857 didn't jump to line 850, because the condition on line 857 was never false
858 self.virtual_allocation += self.vdis[uuid].utilisation
860 for uuid, vdi in self.vdis.items():
861 if vdi.parent: 861 ↛ 862line 861 didn't jump to line 862, because the condition on line 861 was never true
862 if vdi.parent in self.vdis:
863 self.vdis[vdi.parent].read_only = True
864 if vdi.parent in geneology:
865 geneology[vdi.parent].append(uuid)
866 else:
867 geneology[vdi.parent] = [uuid]
869 # Now remove all hidden leaf nodes to avoid introducing records that
870 # will be GC'ed
871 for uuid in list(self.vdis.keys()):
872 if uuid not in geneology and self.vdis[uuid].hidden: 872 ↛ 873line 872 didn't jump to line 873, because the condition on line 872 was never true
873 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
874 del self.vdis[uuid]
876 def _ensureSpaceAvailable(self, amount_needed):
877 space_available = lvutil._getVGstats(self.vgname)['freespace']
878 if (space_available < amount_needed):
879 util.SMlog("Not enough space! free space: %d, need: %d" % \
880 (space_available, amount_needed))
881 raise xs_errors.XenError('SRNoSpace')
883 def _handleInterruptedCloneOps(self):
884 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE)
885 for uuid, val in entries.items(): 885 ↛ 886line 885 didn't jump to line 886, because the loop on line 885 never started
886 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
887 self._handleInterruptedCloneOp(uuid, val)
888 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
889 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid)
891 def _handleInterruptedCoalesceLeaf(self):
892 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
893 if len(entries) > 0: 893 ↛ 894line 893 didn't jump to line 894, because the condition on line 893 was never true
894 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
895 cleanup.gc_force(self.session, self.uuid)
896 self.lvmCache.refresh()
898 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
899 """Either roll back or finalize the interrupted snapshot/clone
900 operation. Rolling back is unsafe if the leaf VHDs have already been
901 in use and written to. However, it is always safe to roll back while
902 we're still in the context of the failed snapshot operation since the
903 VBD is paused for the duration of the operation"""
904 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
905 lvs = lvhdutil.getLVInfo(self.lvmCache)
906 baseUuid, clonUuid = jval.split("_")
908 # is there a "base copy" VDI?
909 if not lvs.get(baseUuid):
910 # no base copy: make sure the original is there
911 if lvs.get(origUuid):
912 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
913 return
914 raise util.SMException("base copy %s not present, " \
915 "but no original %s found" % (baseUuid, origUuid))
917 if forceUndo:
918 util.SMlog("Explicit revert")
919 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
920 return
922 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
923 util.SMlog("One or both leaves missing => revert")
924 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
925 return
927 vdis = lvhdutil.getVDIInfo(self.lvmCache)
928 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
929 util.SMlog("One or both leaves invalid => revert")
930 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
931 return
933 orig = vdis[origUuid]
934 base = vdis[baseUuid]
935 self.lvActivator.activate(baseUuid, base.lvName, False)
936 self.lvActivator.activate(origUuid, orig.lvName, False)
937 if orig.parentUuid != baseUuid:
938 parent = vdis[orig.parentUuid]
939 self.lvActivator.activate(parent.uuid, parent.lvName, False)
940 origPath = os.path.join(self.path, orig.lvName)
941 if not vhdutil.check(origPath):
942 util.SMlog("Orig VHD invalid => revert")
943 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
944 return
946 if clonUuid:
947 clon = vdis[clonUuid]
948 clonPath = os.path.join(self.path, clon.lvName)
949 self.lvActivator.activate(clonUuid, clon.lvName, False)
950 if not vhdutil.check(clonPath):
951 util.SMlog("Clon VHD invalid => revert")
952 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
953 return
955 util.SMlog("Snapshot appears valid, will not roll back")
956 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid)
958 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid):
959 base = lvs[baseUuid]
960 basePath = os.path.join(self.path, base.name)
962 # make the parent RW
963 if base.readonly:
964 self.lvmCache.setReadonly(base.name, False)
966 ns = lvhdutil.NS_PREFIX_LVM + self.uuid
967 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
968 origRefcountNormal = 0
970 # un-hide the parent
971 if base.vdiType == vhdutil.VDI_TYPE_VHD:
972 self.lvActivator.activate(baseUuid, base.name, False)
973 origRefcountNormal = 1
974 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False)
975 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden:
976 vhdutil.setHidden(basePath, False)
977 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden:
978 self.lvmCache.setHidden(base.name, False)
980 # remove the child nodes
981 if clonUuid and lvs.get(clonUuid):
982 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD:
983 raise util.SMException("clone %s not VHD" % clonUuid)
984 self.lvmCache.remove(lvs[clonUuid].name)
985 if self.lvActivator.get(clonUuid, False):
986 self.lvActivator.remove(clonUuid, False)
987 if lvs.get(origUuid):
988 self.lvmCache.remove(lvs[origUuid].name)
990 # inflate the parent to fully-allocated size
991 if base.vdiType == vhdutil.VDI_TYPE_VHD:
992 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
993 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize)
995 # rename back
996 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid
997 self.lvmCache.rename(base.name, origLV)
998 RefCounter.reset(baseUuid, ns)
999 if self.lvActivator.get(baseUuid, False):
1000 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
1001 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1003 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1004 # flag to facilitate vm deactivate
1005 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1006 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1008 # update LVM metadata on slaves
1009 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1010 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname,
1011 origLV, origUuid, slaves)
1013 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1015 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid):
1016 """Finalize the interrupted snapshot/clone operation. This must not be
1017 called from the live snapshot op context because we attempt to pause/
1018 unpause the VBD here (the VBD is already paused during snapshot, so it
1019 would cause a deadlock)"""
1020 base = vdis[baseUuid]
1021 clon = None
1022 if clonUuid:
1023 clon = vdis[clonUuid]
1025 cleanup.abort(self.uuid)
1027 # make sure the parent is hidden and read-only
1028 if not base.hidden:
1029 if base.vdiType == vhdutil.VDI_TYPE_RAW:
1030 self.lvmCache.setHidden(base.lvName)
1031 else:
1032 basePath = os.path.join(self.path, base.lvName)
1033 vhdutil.setHidden(basePath)
1034 if not base.lvReadonly:
1035 self.lvmCache.setReadonly(base.lvName, True)
1037 # NB: since this snapshot-preserving call is only invoked outside the
1038 # snapshot op context, we assume the LVM metadata on the involved slave
1039 # has by now been refreshed and do not attempt to do it here
1041 # Update the original record
1042 try:
1043 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1044 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1045 type = self.session.xenapi.VDI.get_type(vdi_ref)
1046 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1047 sm_config['vhd-parent'] = baseUuid
1048 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1049 except XenAPI.Failure:
1050 util.SMlog("ERROR updating the orig record")
1052 # introduce the new VDI records
1053 if clonUuid:
1054 try:
1055 clon_vdi = VDI.VDI(self, clonUuid)
1056 clon_vdi.read_only = False
1057 clon_vdi.location = clonUuid
1058 clon_vdi.utilisation = clon.sizeLV
1059 clon_vdi.sm_config = {
1060 "vdi_type": vhdutil.VDI_TYPE_VHD,
1061 "vhd-parent": baseUuid}
1063 if not self.legacyMode:
1064 LVMMetadataHandler(self.mdpath). \
1065 ensureSpaceIsAvailableForVdis(1)
1067 clon_vdi_ref = clon_vdi._db_introduce()
1068 util.SMlog("introduced clon VDI: %s (%s)" % \
1069 (clon_vdi_ref, clonUuid))
1071 vdi_info = {UUID_TAG: clonUuid,
1072 NAME_LABEL_TAG: clon_vdi.label,
1073 NAME_DESCRIPTION_TAG: clon_vdi.description,
1074 IS_A_SNAPSHOT_TAG: 0,
1075 SNAPSHOT_OF_TAG: '',
1076 SNAPSHOT_TIME_TAG: '',
1077 TYPE_TAG: type,
1078 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1079 READ_ONLY_TAG: int(clon_vdi.read_only),
1080 MANAGED_TAG: int(clon_vdi.managed),
1081 METADATA_OF_POOL_TAG: ''
1082 }
1084 if not self.legacyMode:
1085 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1087 except XenAPI.Failure:
1088 util.SMlog("ERROR introducing the clon record")
1090 try:
1091 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1092 base_vdi.label = "base copy"
1093 base_vdi.read_only = True
1094 base_vdi.location = baseUuid
1095 base_vdi.size = base.sizeVirt
1096 base_vdi.utilisation = base.sizeLV
1097 base_vdi.managed = False
1098 base_vdi.sm_config = {
1099 "vdi_type": vhdutil.VDI_TYPE_VHD,
1100 "vhd-parent": baseUuid}
1102 if not self.legacyMode:
1103 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1105 base_vdi_ref = base_vdi._db_introduce()
1106 util.SMlog("introduced base VDI: %s (%s)" % \
1107 (base_vdi_ref, baseUuid))
1109 vdi_info = {UUID_TAG: baseUuid,
1110 NAME_LABEL_TAG: base_vdi.label,
1111 NAME_DESCRIPTION_TAG: base_vdi.description,
1112 IS_A_SNAPSHOT_TAG: 0,
1113 SNAPSHOT_OF_TAG: '',
1114 SNAPSHOT_TIME_TAG: '',
1115 TYPE_TAG: type,
1116 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1117 READ_ONLY_TAG: int(base_vdi.read_only),
1118 MANAGED_TAG: int(base_vdi.managed),
1119 METADATA_OF_POOL_TAG: ''
1120 }
1122 if not self.legacyMode:
1123 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1124 except XenAPI.Failure:
1125 util.SMlog("ERROR introducing the base record")
1127 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1129 def _undoAllJournals(self):
1130 """Undo all VHD & SM interrupted journaled operations. This call must
1131 be serialized with respect to all operations that create journals"""
1132 # undoing interrupted inflates must be done first, since undoing VHD
1133 # ops might require inflations
1134 self.lock.acquire()
1135 try:
1136 self._undoAllInflateJournals()
1137 self._undoAllVHDJournals()
1138 self._handleInterruptedCloneOps()
1139 self._handleInterruptedCoalesceLeaf()
1140 finally:
1141 self.lock.release()
1142 self.cleanup()
1144 def _undoAllInflateJournals(self):
1145 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE)
1146 if len(entries) == 0:
1147 return
1148 self._loadvdis()
1149 for uuid, val in entries.items():
1150 vdi = self.vdis.get(uuid)
1151 if vdi: 1151 ↛ 1166line 1151 didn't jump to line 1166, because the condition on line 1151 was never false
1152 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1153 (uuid, vdi.path, val))
1154 if vdi.readonly: 1154 ↛ 1155line 1154 didn't jump to line 1155, because the condition on line 1154 was never true
1155 self.lvmCache.setReadonly(vdi.lvname, False)
1156 self.lvActivator.activate(uuid, vdi.lvname, False)
1157 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1158 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE,
1159 vhdutil.VHD_FOOTER_SIZE)
1160 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val))
1161 if vdi.readonly: 1161 ↛ 1162line 1161 didn't jump to line 1162, because the condition on line 1161 was never true
1162 self.lvmCache.setReadonly(vdi.lvname, True)
1163 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1163 ↛ 1164line 1163 didn't jump to line 1164, because the condition on line 1163 was never true
1164 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1165 self.vgname, vdi.lvname, uuid)
1166 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid)
1167 delattr(self, "vdiInfo")
1168 delattr(self, "allVDIs")
1170 def _undoAllVHDJournals(self):
1171 """check if there are VHD journals in existence and revert them"""
1172 journals = lvhdutil.getAllVHDJournals(self.lvmCache)
1173 if len(journals) == 0: 1173 ↛ 1175line 1173 didn't jump to line 1175, because the condition on line 1173 was never false
1174 return
1175 self._loadvdis()
1176 for uuid, jlvName in journals:
1177 vdi = self.vdis[uuid]
1178 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path))
1179 self.lvActivator.activate(uuid, vdi.lvname, False)
1180 self.lvmCache.activateNoRefcount(jlvName)
1181 fullSize = lvhdutil.calcSizeVHDLV(vdi.size)
1182 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize)
1183 try:
1184 jFile = os.path.join(self.path, jlvName)
1185 vhdutil.revert(vdi.path, jFile)
1186 except util.CommandException:
1187 util.logException("VHD journal revert")
1188 vhdutil.check(vdi.path)
1189 util.SMlog("VHD revert failed but VHD ok: removing journal")
1190 # Attempt to reclaim unused space
1191 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False)
1192 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
1193 if NewSize < fullSize:
1194 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1195 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1196 self.vgname, vdi.lvname, uuid)
1197 self.lvmCache.remove(jlvName)
1198 delattr(self, "vdiInfo")
1199 delattr(self, "allVDIs")
1201 def _updateSlavesPreClone(self, hostRefs, origOldLV):
1202 masterRef = util.get_this_host_ref(self.session)
1203 args = {"vgName": self.vgname,
1204 "action1": "deactivateNoRefcount",
1205 "lvName1": origOldLV}
1206 for hostRef in hostRefs:
1207 if hostRef == masterRef: 1207 ↛ 1208line 1207 didn't jump to line 1208, because the condition on line 1207 was never true
1208 continue
1209 util.SMlog("Deactivate VDI on %s" % hostRef)
1210 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1211 util.SMlog("call-plugin returned: %s" % rv)
1212 if not rv: 1212 ↛ 1213line 1212 didn't jump to line 1213, because the condition on line 1212 was never true
1213 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1215 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1216 baseUuid, baseLV):
1217 """We need to reactivate the original LV on each slave (note that the
1218 name for the original LV might change), as well as init the refcount
1219 for the base LV"""
1220 args = {"vgName": self.vgname,
1221 "action1": "refresh",
1222 "lvName1": origLV,
1223 "action2": "activate",
1224 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid,
1225 "lvName2": baseLV,
1226 "uuid2": baseUuid}
1228 masterRef = util.get_this_host_ref(self.session)
1229 for hostRef in hostRefs:
1230 if hostRef == masterRef: 1230 ↛ 1231line 1230 didn't jump to line 1231, because the condition on line 1230 was never true
1231 continue
1232 util.SMlog("Updating %s, %s, %s on slave %s" % \
1233 (origOldLV, origLV, baseLV, hostRef))
1234 rv = self.session.xenapi.host.call_plugin(
1235 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1236 util.SMlog("call-plugin returned: %s" % rv)
1237 if not rv: 1237 ↛ 1238line 1237 didn't jump to line 1238, because the condition on line 1237 was never true
1238 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1240 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1241 """Reactivate and refresh CBT log file on slaves"""
1242 args = {"vgName": self.vgname,
1243 "action1": "deactivateNoRefcount",
1244 "lvName1": cbtlog,
1245 "action2": "refresh",
1246 "lvName2": cbtlog}
1248 masterRef = util.get_this_host_ref(self.session)
1249 for hostRef in hostRefs:
1250 if hostRef == masterRef: 1250 ↛ 1251line 1250 didn't jump to line 1251, because the condition on line 1250 was never true
1251 continue
1252 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef))
1253 rv = self.session.xenapi.host.call_plugin(
1254 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1255 util.SMlog("call-plugin returned: %s" % rv)
1256 if not rv: 1256 ↛ 1257line 1256 didn't jump to line 1257, because the condition on line 1256 was never true
1257 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1259 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1260 """Tell the slave we deleted the base image"""
1261 args = {"vgName": self.vgname,
1262 "action1": "cleanupLockAndRefcount",
1263 "uuid1": baseUuid,
1264 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid}
1266 masterRef = util.get_this_host_ref(self.session)
1267 for hostRef in hostRefs:
1268 if hostRef == masterRef: 1268 ↛ 1269line 1268 didn't jump to line 1269, because the condition on line 1268 was never true
1269 continue
1270 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef))
1271 rv = self.session.xenapi.host.call_plugin(
1272 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1273 util.SMlog("call-plugin returned: %s" % rv)
1274 if not rv: 1274 ↛ 1275line 1274 didn't jump to line 1275, because the condition on line 1274 was never true
1275 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1277 def _cleanup(self, skipLockCleanup=False):
1278 """delete stale refcounter, flag, and lock files"""
1279 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1280 IPCFlag(self.uuid).clearAll()
1281 if not skipLockCleanup: 1281 ↛ 1282line 1281 didn't jump to line 1282, because the condition on line 1281 was never true
1282 Lock.cleanupAll(self.uuid)
1283 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1285 def _prepareTestMode(self):
1286 util.SMlog("Test mode: %s" % self.testMode)
1287 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1287 ↛ 1288line 1287 didn't jump to line 1288, because the condition on line 1287 was never true
1288 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1289 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1291 def _kickGC(self):
1292 # don't bother if an instance already running (this is just an
1293 # optimization to reduce the overhead of forking a new process if we
1294 # don't have to, but the process will check the lock anyways)
1295 lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid)
1296 if not lockRunning.acquireNoblock(): 1296 ↛ 1297line 1296 didn't jump to line 1297, because the condition on line 1296 was never true
1297 if cleanup.should_preempt(self.session, self.uuid):
1298 util.SMlog("Aborting currently-running coalesce of garbage VDI")
1299 try:
1300 if not cleanup.abort(self.uuid, soft=True):
1301 util.SMlog("The GC has already been scheduled to "
1302 "re-start")
1303 except util.CommandException as e:
1304 if e.code != errno.ETIMEDOUT:
1305 raise
1306 util.SMlog('failed to abort the GC')
1307 else:
1308 util.SMlog("A GC instance already running, not kicking")
1309 return
1310 else:
1311 lockRunning.release()
1313 util.SMlog("Kicking GC")
1314 cleanup.gc(self.session, self.uuid, True)
1316 def ensureCBTSpace(self):
1317 # Ensure we have space for at least one LV
1318 self._ensureSpaceAvailable(self.journaler.LV_SIZE)
1321class LVHDVDI(VDI.VDI):
1323 JRN_CLONE = "clone" # journal entry type for the clone operation
1325 def load(self, vdi_uuid):
1326 self.lock = self.sr.lock
1327 self.lvActivator = self.sr.lvActivator
1328 self.loaded = False
1329 self.vdi_type = vhdutil.VDI_TYPE_VHD
1330 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1330 ↛ 1332line 1330 didn't jump to line 1332, because the condition on line 1330 was never false
1331 self.vdi_type = vhdutil.VDI_TYPE_RAW
1332 self.uuid = vdi_uuid
1333 self.location = self.uuid
1334 self.exists = True
1336 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1337 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1338 if self.parent: 1338 ↛ 1339line 1338 didn't jump to line 1339, because the condition on line 1338 was never true
1339 self.sm_config_override['vhd-parent'] = self.parent
1340 else:
1341 self.sm_config_override['vhd-parent'] = None
1342 return
1344 # scan() didn't run: determine the type of the VDI manually
1345 if self._determineType():
1346 return
1348 # the VDI must be in the process of being created
1349 self.exists = False
1350 if "vdi_sm_config" in self.sr.srcmd.params and \ 1350 ↛ 1352line 1350 didn't jump to line 1352, because the condition on line 1350 was never true
1351 "type" in self.sr.srcmd.params["vdi_sm_config"]:
1352 type = self.sr.srcmd.params["vdi_sm_config"]["type"]
1353 if type == PARAM_RAW:
1354 self.vdi_type = vhdutil.VDI_TYPE_RAW
1355 elif type == PARAM_VHD:
1356 self.vdi_type = vhdutil.VDI_TYPE_VHD
1357 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode:
1358 raise xs_errors.XenError('VDICreate', \
1359 opterr='Cannot create VHD type disk in legacy mode')
1360 else:
1361 raise xs_errors.XenError('VDICreate', opterr='bad type')
1362 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid)
1363 self.path = os.path.join(self.sr.path, self.lvname)
1365 def create(self, sr_uuid, vdi_uuid, size):
1366 util.SMlog("LVHDVDI.create for %s" % self.uuid)
1367 if not self.sr.isMaster:
1368 raise xs_errors.XenError('LVMMaster')
1369 if self.exists:
1370 raise xs_errors.XenError('VDIExists')
1372 size = vhdutil.validate_and_round_vhd_size(int(size))
1374 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \
1375 (self.vdi_type, self.path, size))
1376 lvSize = 0
1377 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1378 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1379 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1380 else:
1381 if self.sr.provision == "thin":
1382 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1383 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1384 elif self.sr.provision == "thick":
1385 lvSize = lvhdutil.calcSizeVHDLV(int(size))
1387 self.sr._ensureSpaceAvailable(lvSize)
1389 try:
1390 self.sr.lvmCache.create(self.lvname, lvSize)
1391 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1392 self.size = self.sr.lvmCache.getSize(self.lvname)
1393 else:
1394 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB)
1395 self.size = vhdutil.getSizeVirt(self.path)
1396 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1397 except util.CommandException as e:
1398 util.SMlog("Unable to create VDI")
1399 self.sr.lvmCache.remove(self.lvname)
1400 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1402 self.utilisation = lvSize
1403 self.sm_config["vdi_type"] = self.vdi_type
1405 if not self.sr.legacyMode:
1406 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1408 self.ref = self._db_introduce()
1409 self.sr._updateStats(self.sr.uuid, self.size)
1411 vdi_info = {UUID_TAG: self.uuid,
1412 NAME_LABEL_TAG: util.to_plain_string(self.label),
1413 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1414 IS_A_SNAPSHOT_TAG: 0,
1415 SNAPSHOT_OF_TAG: '',
1416 SNAPSHOT_TIME_TAG: '',
1417 TYPE_TAG: self.ty,
1418 VDI_TYPE_TAG: self.vdi_type,
1419 READ_ONLY_TAG: int(self.read_only),
1420 MANAGED_TAG: int(self.managed),
1421 METADATA_OF_POOL_TAG: ''
1422 }
1424 if not self.sr.legacyMode:
1425 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1427 return VDI.VDI.get_params(self)
1429 def delete(self, sr_uuid, vdi_uuid, data_only=False):
1430 util.SMlog("LVHDVDI.delete for %s" % self.uuid)
1431 try:
1432 self._loadThis()
1433 except SR.SRException as e:
1434 # Catch 'VDI doesn't exist' exception
1435 if e.errno == 46:
1436 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1437 raise
1439 vdi_ref = self.sr.srcmd.params['vdi_ref']
1440 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1441 raise xs_errors.XenError("VDIDelete", \
1442 opterr="Deleting non-leaf node not permitted")
1444 if not self.hidden:
1445 self._markHidden()
1447 if not data_only:
1448 # Remove from XAPI and delete from MGT
1449 self._db_forget()
1450 else:
1451 # If this is a data_destroy call, don't remove from XAPI db
1452 # Only delete from MGT
1453 if not self.sr.legacyMode:
1454 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1456 # deactivate here because it might be too late to do it in the "final"
1457 # step: GC might have removed the LV by then
1458 if self.sr.lvActivator.get(self.uuid, False):
1459 self.sr.lvActivator.deactivate(self.uuid, False)
1461 try:
1462 self.sr.lvmCache.remove(self.lvname)
1463 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid)
1464 self.sr.lock.cleanupAll(vdi_uuid)
1465 except SR.SRException as e:
1466 util.SMlog(
1467 "Failed to remove the volume (maybe is leaf coalescing) "
1468 "for %s err:%d" % (self.uuid, e.errno))
1470 self.sr._updateStats(self.sr.uuid, -self.size)
1471 self.sr._kickGC()
1472 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1474 def attach(self, sr_uuid, vdi_uuid):
1475 util.SMlog("LVHDVDI.attach for %s" % self.uuid)
1476 if self.sr.journaler.hasJournals(self.uuid):
1477 raise xs_errors.XenError('VDIUnavailable',
1478 opterr='Interrupted operation detected on this VDI, '
1479 'scan SR first to trigger auto-repair')
1481 writable = ('args' not in self.sr.srcmd.params) or \
1482 (self.sr.srcmd.params['args'][0] == "true")
1483 needInflate = True
1484 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable:
1485 needInflate = False
1486 else:
1487 self._loadThis()
1488 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size):
1489 needInflate = False
1491 if needInflate:
1492 try:
1493 self._prepareThin(True)
1494 except:
1495 util.logException("attach")
1496 raise xs_errors.XenError('LVMProvisionAttach')
1498 try:
1499 return self._attach()
1500 finally:
1501 if not self.sr.lvActivator.deactivateAll():
1502 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1504 def detach(self, sr_uuid, vdi_uuid):
1505 util.SMlog("LVHDVDI.detach for %s" % self.uuid)
1506 self._loadThis()
1507 already_deflated = (self.utilisation < \
1508 lvhdutil.calcSizeVHDLV(self.size))
1509 needDeflate = True
1510 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated:
1511 needDeflate = False
1512 elif self.sr.provision == "thick":
1513 needDeflate = False
1514 # except for snapshots, which are always deflated
1515 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1516 vdi_ref = self.sr.srcmd.params['vdi_ref']
1517 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1518 if snap:
1519 needDeflate = True
1521 if needDeflate:
1522 try:
1523 self._prepareThin(False)
1524 except:
1525 util.logException("_prepareThin")
1526 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1528 try:
1529 self._detach()
1530 finally:
1531 if not self.sr.lvActivator.deactivateAll():
1532 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1534 # We only support offline resize
1535 def resize(self, sr_uuid, vdi_uuid, size):
1536 util.SMlog("LVHDVDI.resize for %s" % self.uuid)
1537 if not self.sr.isMaster:
1538 raise xs_errors.XenError('LVMMaster')
1540 self._loadThis()
1541 if self.hidden:
1542 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1544 if size < self.size:
1545 util.SMlog('vdi_resize: shrinking not supported: ' + \
1546 '(current size: %d, new size: %d)' % (self.size, size))
1547 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1549 size = vhdutil.validate_and_round_vhd_size(int(size))
1551 if size == self.size:
1552 return VDI.VDI.get_params(self)
1554 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1555 lvSizeOld = self.size
1556 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1557 else:
1558 lvSizeOld = self.utilisation
1559 lvSizeNew = lvhdutil.calcSizeVHDLV(size)
1560 if self.sr.provision == "thin":
1561 # VDI is currently deflated, so keep it deflated
1562 lvSizeNew = lvSizeOld
1563 assert(lvSizeNew >= lvSizeOld)
1564 spaceNeeded = lvSizeNew - lvSizeOld
1565 self.sr._ensureSpaceAvailable(spaceNeeded)
1567 oldSize = self.size
1568 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1569 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1570 self.size = self.sr.lvmCache.getSize(self.lvname)
1571 self.utilisation = self.size
1572 else:
1573 if lvSizeNew != lvSizeOld:
1574 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid,
1575 lvSizeNew)
1576 vhdutil.setSizeVirtFast(self.path, size)
1577 self.size = vhdutil.getSizeVirt(self.path)
1578 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1580 vdi_ref = self.sr.srcmd.params['vdi_ref']
1581 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1582 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1583 str(self.utilisation))
1584 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1585 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1586 return VDI.VDI.get_params(self)
1588 def clone(self, sr_uuid, vdi_uuid):
1589 return self._do_snapshot(
1590 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1592 def compose(self, sr_uuid, vdi1, vdi2):
1593 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1))
1594 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1595 raise xs_errors.XenError('Unimplemented')
1597 parent_uuid = vdi1
1598 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid
1599 assert(self.sr.lvmCache.checkLV(parent_lvname))
1600 parent_path = os.path.join(self.sr.path, parent_lvname)
1602 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1603 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1605 vhdutil.setParent(self.path, parent_path, False)
1606 vhdutil.setHidden(parent_path)
1607 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1609 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1610 True):
1611 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1613 util.SMlog("Compose done")
1615 def reset_leaf(self, sr_uuid, vdi_uuid):
1616 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid)
1617 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1618 raise xs_errors.XenError('Unimplemented')
1620 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1622 # safety check
1623 if not vhdutil.hasParent(self.path):
1624 raise util.SMException("ERROR: VDI %s has no parent, " + \
1625 "will not reset contents" % self.uuid)
1627 vhdutil.killData(self.path)
1629 def _attach(self):
1630 self._chainSetActive(True, True, True)
1631 if not util.pathexists(self.path):
1632 raise xs_errors.XenError('VDIUnavailable', \
1633 opterr='Could not find: %s' % self.path)
1635 if not hasattr(self, 'xenstore_data'):
1636 self.xenstore_data = {}
1638 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1639 scsiutil.gen_synthetic_page_data(self.uuid)))
1641 self.xenstore_data['storage-type'] = 'lvm'
1642 self.xenstore_data['vdi-type'] = self.vdi_type
1644 self.attached = True
1645 self.sr.lvActivator.persist()
1646 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1648 def _detach(self):
1649 self._chainSetActive(False, True)
1650 self.attached = False
1652 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1653 cloneOp=False, secondary=None, cbtlog=None):
1654 # If cbt enabled, save file consistency state
1655 if cbtlog is not None:
1656 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1656 ↛ 1657line 1656 didn't jump to line 1657, because the condition on line 1656 was never true
1657 consistency_state = False
1658 else:
1659 consistency_state = True
1660 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1661 (consistency_state, vdi_uuid))
1662 else:
1663 consistency_state = None
1665 pause_time = time.time()
1666 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1666 ↛ 1667line 1666 didn't jump to line 1667, because the condition on line 1666 was never true
1667 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1669 snapResult = None
1670 try:
1671 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state)
1672 except Exception as e1:
1673 try:
1674 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1675 secondary=None)
1676 except Exception as e2:
1677 util.SMlog('WARNING: failed to clean up failed snapshot: '
1678 '%s (error ignored)' % e2)
1679 raise
1680 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1681 unpause_time = time.time()
1682 if (unpause_time - pause_time) > LONG_SNAPTIME: 1682 ↛ 1683line 1682 didn't jump to line 1683, because the condition on line 1682 was never true
1683 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1684 (unpause_time - pause_time))
1685 return snapResult
1687 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None):
1688 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1690 if not self.sr.isMaster: 1690 ↛ 1691line 1690 didn't jump to line 1691, because the condition on line 1690 was never true
1691 raise xs_errors.XenError('LVMMaster')
1692 if self.sr.legacyMode: 1692 ↛ 1693line 1692 didn't jump to line 1693, because the condition on line 1692 was never true
1693 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1695 self._loadThis()
1696 if self.hidden: 1696 ↛ 1697line 1696 didn't jump to line 1697, because the condition on line 1696 was never true
1697 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1699 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1700 self.sr.srcmd.params['vdi_ref'])
1701 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1701 ↛ 1702line 1701 didn't jump to line 1702, because the condition on line 1701 was never true
1702 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1703 raise xs_errors.XenError('Unimplemented', \
1704 opterr='Raw VDI, snapshot or clone not permitted')
1706 # we must activate the entire VHD chain because the real parent could
1707 # theoretically be anywhere in the chain if all VHDs under it are empty
1708 self._chainSetActive(True, False)
1709 if not util.pathexists(self.path): 1709 ↛ 1710line 1709 didn't jump to line 1710, because the condition on line 1709 was never true
1710 raise xs_errors.XenError('VDIUnavailable', \
1711 opterr='VDI unavailable: %s' % (self.path))
1713 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1713 ↛ 1721line 1713 didn't jump to line 1721, because the condition on line 1713 was never false
1714 depth = vhdutil.getDepth(self.path)
1715 if depth == -1: 1715 ↛ 1716line 1715 didn't jump to line 1716, because the condition on line 1715 was never true
1716 raise xs_errors.XenError('VDIUnavailable', \
1717 opterr='failed to get VHD depth')
1718 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1718 ↛ 1719line 1718 didn't jump to line 1719, because the condition on line 1718 was never true
1719 raise xs_errors.XenError('SnapshotChainTooLong')
1721 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1722 self.sr.srcmd.params['vdi_ref'])
1724 fullpr = lvhdutil.calcSizeVHDLV(self.size)
1725 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \
1726 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1727 lvSizeOrig = thinpr
1728 lvSizeClon = thinpr
1730 hostRefs = []
1731 if self.sr.cmd == "vdi_snapshot":
1732 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1733 if hostRefs: 1733 ↛ 1735line 1733 didn't jump to line 1735, because the condition on line 1733 was never false
1734 lvSizeOrig = fullpr
1735 if self.sr.provision == "thick": 1735 ↛ 1741line 1735 didn't jump to line 1741, because the condition on line 1735 was never false
1736 if not self.issnap: 1736 ↛ 1737line 1736 didn't jump to line 1737, because the condition on line 1736 was never true
1737 lvSizeOrig = fullpr
1738 if self.sr.cmd != "vdi_snapshot":
1739 lvSizeClon = fullpr
1741 if (snapType == VDI.SNAPSHOT_SINGLE or 1741 ↛ 1743line 1741 didn't jump to line 1743, because the condition on line 1741 was never true
1742 snapType == VDI.SNAPSHOT_INTERNAL):
1743 lvSizeClon = 0
1745 # the space required must include 2 journal LVs: a clone journal and an
1746 # inflate journal (for the failure handling
1747 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1748 lvSizeBase = self.size
1749 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1749 ↛ 1753line 1749 didn't jump to line 1753, because the condition on line 1749 was never false
1750 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1751 vhdutil.getSizePhys(self.path))
1752 size_req -= (self.utilisation - lvSizeBase)
1753 self.sr._ensureSpaceAvailable(size_req)
1755 if hostRefs:
1756 self.sr._updateSlavesPreClone(hostRefs, self.lvname)
1758 baseUuid = util.gen_uuid()
1759 origUuid = self.uuid
1760 clonUuid = ""
1761 if snapType == VDI.SNAPSHOT_DOUBLE: 1761 ↛ 1763line 1761 didn't jump to line 1763, because the condition on line 1761 was never false
1762 clonUuid = util.gen_uuid()
1763 jval = "%s_%s" % (baseUuid, clonUuid)
1764 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1765 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1767 try:
1768 # self becomes the "base vdi"
1769 origOldLV = self.lvname
1770 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid
1771 self.sr.lvmCache.rename(self.lvname, baseLV)
1772 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1773 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1774 self.uuid = baseUuid
1775 self.lvname = baseLV
1776 self.path = os.path.join(self.sr.path, baseLV)
1777 self.label = "base copy"
1778 self.read_only = True
1779 self.location = self.uuid
1780 self.managed = False
1782 # shrink the base copy to the minimum - we do it before creating
1783 # the snapshot volumes to avoid requiring double the space
1784 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1784 ↛ 1787line 1784 didn't jump to line 1787, because the condition on line 1784 was never false
1785 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1786 self.utilisation = lvSizeBase
1787 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1789 snapVDI = self._createSnap(origUuid, lvSizeOrig, False)
1790 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1791 snapVDI2 = None
1792 if snapType == VDI.SNAPSHOT_DOUBLE: 1792 ↛ 1798line 1792 didn't jump to line 1798, because the condition on line 1792 was never false
1793 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True)
1794 # If we have CBT enabled on the VDI,
1795 # set CBT status for the new snapshot disk
1796 if cbtlog:
1797 snapVDI2.cbt_enabled = True
1798 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1800 # note: it is important to mark the parent hidden only AFTER the
1801 # new VHD children have been created, which are referencing it;
1802 # otherwise we would introduce a race with GC that could reclaim
1803 # the parent before we snapshot it
1804 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1804 ↛ 1805line 1804 didn't jump to line 1805, because the condition on line 1804 was never true
1805 self.sr.lvmCache.setHidden(self.lvname)
1806 else:
1807 vhdutil.setHidden(self.path)
1808 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1810 # set the base copy to ReadOnly
1811 self.sr.lvmCache.setReadonly(self.lvname, True)
1812 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1814 if hostRefs:
1815 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1816 snapVDI.lvname, self.uuid, self.lvname)
1818 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1819 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1820 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1821 if hostRefs: 1821 ↛ 1835line 1821 didn't jump to line 1835, because the condition on line 1821 was never false
1822 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1823 try:
1824 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1825 except:
1826 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1827 alert_str = ("Creating CBT snapshot for {} failed"
1828 .format(snapVDI.uuid))
1829 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1830 pass
1832 except (util.SMException, XenAPI.Failure) as e:
1833 util.logException("LVHDVDI._snapshot")
1834 self._failClone(origUuid, jval, str(e))
1835 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1837 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1839 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1841 def _createSnap(self, snapUuid, snapSizeLV, isNew):
1842 """Snapshot self and return the snapshot VDI object"""
1843 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid
1844 snapPath = os.path.join(self.sr.path, snapLV)
1845 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1846 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1847 if isNew:
1848 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1849 self.sr.lvActivator.add(snapUuid, snapLV, False)
1850 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW)
1851 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB)
1852 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid)
1854 snapVDI = LVHDVDI(self.sr, snapUuid)
1855 snapVDI.read_only = False
1856 snapVDI.location = snapUuid
1857 snapVDI.size = self.size
1858 snapVDI.utilisation = snapSizeLV
1859 snapVDI.sm_config = dict()
1860 for key, val in self.sm_config.items(): 1860 ↛ 1861line 1860 didn't jump to line 1861, because the loop on line 1860 never started
1861 if key not in [
1862 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1863 not key.startswith("host_"):
1864 snapVDI.sm_config[key] = val
1865 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1866 snapVDI.sm_config["vhd-parent"] = snapParent
1867 snapVDI.lvname = snapLV
1868 return snapVDI
1870 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1871 if snapType is not VDI.SNAPSHOT_INTERNAL: 1871 ↛ 1873line 1871 didn't jump to line 1873, because the condition on line 1871 was never false
1872 self.sr._updateStats(self.sr.uuid, self.size)
1873 basePresent = True
1875 # Verify parent locator field of both children and delete basePath if
1876 # unused
1877 snapParent = snapVDI.sm_config["vhd-parent"]
1878 snap2Parent = ""
1879 if snapVDI2: 1879 ↛ 1881line 1879 didn't jump to line 1881, because the condition on line 1879 was never false
1880 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1881 if snapParent != self.uuid and \ 1881 ↛ 1908line 1881 didn't jump to line 1908, because the condition on line 1881 was never false
1882 (not snapVDI2 or snap2Parent != self.uuid):
1883 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1884 (snapParent, self.uuid, snap2Parent, self.lvname))
1885 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1886 self.sr.lvmCache.remove(self.lvname)
1887 self.sr.lvActivator.remove(self.uuid, False)
1888 if hostRefs:
1889 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1890 basePresent = False
1891 else:
1892 # assign the _binary_ refcount of the original VDI to the new base
1893 # VDI (but as the normal refcount, since binary refcounts are only
1894 # for leaf nodes). The normal refcount of the child is not
1895 # transferred to to the base VDI because normal refcounts are
1896 # incremented and decremented individually, and not based on the
1897 # VHD chain (i.e., the child's normal refcount will be decremented
1898 # independently of its parent situation). Add 1 for this clone op.
1899 # Note that we do not need to do protect the refcount operations
1900 # below with per-VDI locking like we do in lvutil because at this
1901 # point we have exclusive access to the VDIs involved. Other SM
1902 # operations are serialized by the Agent or with the SR lock, and
1903 # any coalesce activations are serialized with the SR lock. (The
1904 # coalesce activates the coalesced VDI pair in the beginning, which
1905 # cannot affect the VDIs here because they cannot possibly be
1906 # involved in coalescing at this point, and at the relinkSkip step
1907 # that activates the children, which takes the SR lock.)
1908 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid
1909 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1910 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1912 # the "paused" and "host_*" sm-config keys are special and must stay on
1913 # the leaf without being inherited by anyone else
1914 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1914 ↛ 1915line 1914 didn't jump to line 1915, because the loop on line 1914 never started
1915 snapVDI.sm_config[key] = self.sm_config[key]
1916 del self.sm_config[key]
1918 # Introduce any new VDI records & update the existing one
1919 type = self.session.xenapi.VDI.get_type( \
1920 self.sr.srcmd.params['vdi_ref'])
1921 if snapVDI2: 1921 ↛ 1963line 1921 didn't jump to line 1963, because the condition on line 1921 was never false
1922 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1923 vdiRef = snapVDI2._db_introduce()
1924 if cloneOp:
1925 vdi_info = {UUID_TAG: snapVDI2.uuid,
1926 NAME_LABEL_TAG: util.to_plain_string( \
1927 self.session.xenapi.VDI.get_name_label( \
1928 self.sr.srcmd.params['vdi_ref'])),
1929 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1930 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1931 IS_A_SNAPSHOT_TAG: 0,
1932 SNAPSHOT_OF_TAG: '',
1933 SNAPSHOT_TIME_TAG: '',
1934 TYPE_TAG: type,
1935 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1936 READ_ONLY_TAG: 0,
1937 MANAGED_TAG: int(snapVDI2.managed),
1938 METADATA_OF_POOL_TAG: ''
1939 }
1940 else:
1941 util.SMlog("snapshot VDI params: %s" % \
1942 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
1943 vdi_info = {UUID_TAG: snapVDI2.uuid,
1944 NAME_LABEL_TAG: util.to_plain_string( \
1945 self.session.xenapi.VDI.get_name_label( \
1946 self.sr.srcmd.params['vdi_ref'])),
1947 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1948 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1949 IS_A_SNAPSHOT_TAG: 1,
1950 SNAPSHOT_OF_TAG: snapVDI.uuid,
1951 SNAPSHOT_TIME_TAG: '',
1952 TYPE_TAG: type,
1953 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1954 READ_ONLY_TAG: 0,
1955 MANAGED_TAG: int(snapVDI2.managed),
1956 METADATA_OF_POOL_TAG: ''
1957 }
1959 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1960 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
1961 (vdiRef, snapVDI2.uuid))
1963 if basePresent: 1963 ↛ 1964line 1963 didn't jump to line 1964, because the condition on line 1963 was never true
1964 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1965 vdiRef = self._db_introduce()
1966 vdi_info = {UUID_TAG: self.uuid,
1967 NAME_LABEL_TAG: self.label,
1968 NAME_DESCRIPTION_TAG: self.description,
1969 IS_A_SNAPSHOT_TAG: 0,
1970 SNAPSHOT_OF_TAG: '',
1971 SNAPSHOT_TIME_TAG: '',
1972 TYPE_TAG: type,
1973 VDI_TYPE_TAG: self.sm_config['vdi_type'],
1974 READ_ONLY_TAG: 1,
1975 MANAGED_TAG: 0,
1976 METADATA_OF_POOL_TAG: ''
1977 }
1979 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1980 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
1981 (vdiRef, self.uuid))
1983 # Update the original record
1984 vdi_ref = self.sr.srcmd.params['vdi_ref']
1985 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
1986 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
1987 str(snapVDI.utilisation))
1989 # Return the info on the new snap VDI
1990 snap = snapVDI2
1991 if not snap: 1991 ↛ 1992line 1991 didn't jump to line 1992, because the condition on line 1991 was never true
1992 snap = self
1993 if not basePresent:
1994 # a single-snapshot of an empty VDI will be a noop, resulting
1995 # in no new VDIs, so return the existing one. The GC wouldn't
1996 # normally try to single-snapshot an empty VHD of course, but
1997 # if an external snapshot operation manages to sneak in right
1998 # before a snapshot-coalesce phase, we would get here
1999 snap = snapVDI
2000 return snap.get_params()
2002 def _initFromVDIInfo(self, vdiInfo):
2003 self.vdi_type = vdiInfo.vdiType
2004 self.lvname = vdiInfo.lvName
2005 self.size = vdiInfo.sizeVirt
2006 self.utilisation = vdiInfo.sizeLV
2007 self.hidden = vdiInfo.hidden
2008 if self.hidden: 2008 ↛ 2009line 2008 didn't jump to line 2009, because the condition on line 2008 was never true
2009 self.managed = False
2010 self.active = vdiInfo.lvActive
2011 self.readonly = vdiInfo.lvReadonly
2012 self.parent = vdiInfo.parentUuid
2013 self.path = os.path.join(self.sr.path, self.lvname)
2014 if hasattr(self, "sm_config_override"): 2014 ↛ 2017line 2014 didn't jump to line 2017, because the condition on line 2014 was never false
2015 self.sm_config_override["vdi_type"] = self.vdi_type
2016 else:
2017 self.sm_config_override = {'vdi_type': self.vdi_type}
2018 self.loaded = True
2020 def _initFromLVInfo(self, lvInfo):
2021 self.vdi_type = lvInfo.vdiType
2022 self.lvname = lvInfo.name
2023 self.size = lvInfo.size
2024 self.utilisation = lvInfo.size
2025 self.hidden = lvInfo.hidden
2026 self.active = lvInfo.active
2027 self.readonly = lvInfo.readonly
2028 self.parent = ''
2029 self.path = os.path.join(self.sr.path, self.lvname)
2030 if hasattr(self, "sm_config_override"): 2030 ↛ 2033line 2030 didn't jump to line 2033, because the condition on line 2030 was never false
2031 self.sm_config_override["vdi_type"] = self.vdi_type
2032 else:
2033 self.sm_config_override = {'vdi_type': self.vdi_type}
2034 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2034 ↛ 2035line 2034 didn't jump to line 2035, because the condition on line 2034 was never true
2035 self.loaded = True
2037 def _initFromVHDInfo(self, vhdInfo):
2038 self.size = vhdInfo.sizeVirt
2039 self.parent = vhdInfo.parentUuid
2040 self.hidden = vhdInfo.hidden
2041 self.loaded = True
2043 def _determineType(self):
2044 """Determine whether this is a raw or a VHD VDI"""
2045 if "vdi_ref" in self.sr.srcmd.params: 2045 ↛ 2058line 2045 didn't jump to line 2058, because the condition on line 2045 was never false
2046 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2047 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2048 if sm_config.get("vdi_type"): 2048 ↛ 2049line 2048 didn't jump to line 2049, because the condition on line 2048 was never true
2049 self.vdi_type = sm_config["vdi_type"]
2050 prefix = lvhdutil.LV_PREFIX[self.vdi_type]
2051 self.lvname = "%s%s" % (prefix, self.uuid)
2052 self.path = os.path.join(self.sr.path, self.lvname)
2053 self.sm_config_override = sm_config
2054 return True
2056 # LVM commands can be costly, so check the file directly first in case
2057 # the LV is active
2058 found = False
2059 for t in lvhdutil.VDI_TYPES: 2059 ↛ 2060line 2059 didn't jump to line 2060, because the loop on line 2059 never started
2060 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid)
2061 path = os.path.join(self.sr.path, lvname)
2062 if util.pathexists(path):
2063 if found:
2064 raise xs_errors.XenError('VDILoad',
2065 opterr="multiple VDI's: uuid %s" % self.uuid)
2066 found = True
2067 self.vdi_type = t
2068 self.lvname = lvname
2069 self.path = path
2070 if found: 2070 ↛ 2071line 2070 didn't jump to line 2071, because the condition on line 2070 was never true
2071 return True
2073 # now list all LV's
2074 if not lvutil._checkVG(self.sr.vgname): 2074 ↛ 2076line 2074 didn't jump to line 2076, because the condition on line 2074 was never true
2075 # when doing attach_from_config, the VG won't be there yet
2076 return False
2078 lvs = lvhdutil.getLVInfo(self.sr.lvmCache)
2079 if lvs.get(self.uuid):
2080 self._initFromLVInfo(lvs[self.uuid])
2081 return True
2082 return False
2084 def _loadThis(self):
2085 """Load VDI info for this VDI and activate the LV if it's VHD. We
2086 don't do it in VDI.load() because not all VDI operations need it."""
2087 if self.loaded: 2087 ↛ 2088line 2087 didn't jump to line 2088, because the condition on line 2087 was never true
2088 if self.vdi_type == vhdutil.VDI_TYPE_VHD:
2089 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2090 return
2091 try:
2092 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname)
2093 except util.CommandException as e:
2094 raise xs_errors.XenError('VDIUnavailable',
2095 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2096 if not lvs.get(self.uuid): 2096 ↛ 2097line 2096 didn't jump to line 2097, because the condition on line 2096 was never true
2097 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2098 self._initFromLVInfo(lvs[self.uuid])
2099 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2099 ↛ 2106line 2099 didn't jump to line 2106, because the condition on line 2099 was never false
2100 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2101 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False)
2102 if not vhdInfo: 2102 ↛ 2103line 2102 didn't jump to line 2103, because the condition on line 2102 was never true
2103 raise xs_errors.XenError('VDIUnavailable', \
2104 opterr='getVHDInfo failed')
2105 self._initFromVHDInfo(vhdInfo)
2106 self.loaded = True
2108 def _chainSetActive(self, active, binary, persistent=False):
2109 if binary: 2109 ↛ 2110line 2109 didn't jump to line 2110, because the condition on line 2109 was never true
2110 (count, bcount) = RefCounter.checkLocked(self.uuid,
2111 lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
2112 if (active and bcount > 0) or (not active and bcount == 0):
2113 return # this is a redundant activation/deactivation call
2115 vdiList = {self.uuid: self.lvname}
2116 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2116 ↛ 2119line 2116 didn't jump to line 2119, because the condition on line 2116 was never false
2117 vdiList = vhdutil.getParentChain(self.lvname,
2118 lvhdutil.extractUuid, self.sr.vgname)
2119 for uuid, lvName in vdiList.items(): 2119 ↛ 2120line 2119 didn't jump to line 2120, because the loop on line 2119 never started
2120 binaryParam = binary
2121 if uuid != self.uuid:
2122 binaryParam = False # binary param only applies to leaf nodes
2123 if active:
2124 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2125 persistent)
2126 else:
2127 # just add the LVs for deactivation in the final (cleanup)
2128 # step. The LVs must not have been activated during the current
2129 # operation
2130 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2132 def _failClone(self, uuid, jval, msg):
2133 try:
2134 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2135 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2136 except Exception as e:
2137 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2138 ' %s (error ignored)' % e)
2139 raise xs_errors.XenError('VDIClone', opterr=msg)
2141 def _markHidden(self):
2142 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
2143 self.sr.lvmCache.setHidden(self.lvname)
2144 else:
2145 vhdutil.setHidden(self.path)
2146 self.hidden = 1
2148 def _prepareThin(self, attach):
2149 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2150 if self.sr.isMaster:
2151 # the master can prepare the VDI locally
2152 if attach:
2153 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid)
2154 else:
2155 lvhdutil.detachThin(self.session, self.sr.lvmCache,
2156 self.sr.uuid, self.uuid)
2157 else:
2158 fn = "attach"
2159 if not attach:
2160 fn = "detach"
2161 pools = self.session.xenapi.pool.get_all()
2162 master = self.session.xenapi.pool.get_master(pools[0])
2163 rv = self.session.xenapi.host.call_plugin(
2164 master, self.sr.THIN_PLUGIN, fn,
2165 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid})
2166 util.SMlog("call-plugin returned: %s" % rv)
2167 if not rv:
2168 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2169 # refresh to pick up the size change on this slave
2170 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2172 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2173 if origUtilisation != self.utilisation:
2174 vdi_ref = self.sr.srcmd.params['vdi_ref']
2175 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2176 str(self.utilisation))
2177 stats = lvutil._getVGstats(self.sr.vgname)
2178 sr_utilisation = stats['physical_utilisation']
2179 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2180 str(sr_utilisation))
2182 def update(self, sr_uuid, vdi_uuid):
2183 if self.sr.legacyMode:
2184 return
2186 #Synch the name_label of this VDI on storage with the name_label in XAPI
2187 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2188 update_map = {}
2189 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2190 METADATA_OBJECT_TYPE_VDI
2191 update_map[UUID_TAG] = self.uuid
2192 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2193 self.session.xenapi.VDI.get_name_label(vdi_ref))
2194 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2195 self.session.xenapi.VDI.get_name_description(vdi_ref))
2196 update_map[SNAPSHOT_TIME_TAG] = \
2197 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2198 update_map[METADATA_OF_POOL_TAG] = \
2199 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2200 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2202 def _ensure_cbt_space(self):
2203 self.sr.ensureCBTSpace()
2205 def _create_cbt_log(self):
2206 logname = self._get_cbt_logname(self.uuid)
2207 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG)
2208 logpath = super(LVHDVDI, self)._create_cbt_log()
2209 self.sr.lvmCache.deactivateNoRefcount(logname)
2210 return logpath
2212 def _delete_cbt_log(self):
2213 logpath = self._get_cbt_logpath(self.uuid)
2214 if self._cbt_log_exists(logpath):
2215 logname = self._get_cbt_logname(self.uuid)
2216 self.sr.lvmCache.remove(logname)
2218 def _rename(self, oldpath, newpath):
2219 oldname = os.path.basename(oldpath)
2220 newname = os.path.basename(newpath)
2221 self.sr.lvmCache.rename(oldname, newname)
2223 def _activate_cbt_log(self, lv_name):
2224 self.sr.lvmCache.refresh()
2225 if not self.sr.lvmCache.is_active(lv_name): 2225 ↛ 2226line 2225 didn't jump to line 2226, because the condition on line 2225 was never true
2226 try:
2227 self.sr.lvmCache.activateNoRefcount(lv_name)
2228 return True
2229 except Exception as e:
2230 util.SMlog("Exception in _activate_cbt_log, "
2231 "Error: %s." % str(e))
2232 raise
2233 else:
2234 return False
2236 def _deactivate_cbt_log(self, lv_name):
2237 try:
2238 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2239 except Exception as e:
2240 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2241 raise
2243 def _cbt_log_exists(self, logpath):
2244 return lvutil.exists(logpath)
2246if __name__ == '__main__': 2246 ↛ 2247line 2246 didn't jump to line 2247, because the condition on line 2246 was never true
2247 SRCommand.run(LVHDSR, DRIVER_INFO)
2248else:
2249 SR.registerSR(LVHDSR)