Coverage for drivers/LVHDSR.py : 44%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#!/usr/bin/python3
2#
3# Copyright (C) Citrix Systems Inc.
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License as published
7# by the Free Software Foundation; version 2.1 only.
8#
9# This program is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with this program; if not, write to the Free Software Foundation, Inc.,
16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17#
18# LVHDSR: VHD on LVM storage repository
19#
21import SR
22from SR import deviceCheck
23import VDI
24import SRCommand
25import util
26import lvutil
27import lvmcache
28import vhdutil
29import lvhdutil
30import scsiutil
31import os
32import sys
33import time
34import errno
35import xs_errors
36import cleanup
37import blktap2
38from journaler import Journaler
39from lock import Lock
40from refcounter import RefCounter
41from ipc import IPCFlag
42from lvmanager import LVActivator
43import XenAPI # pylint: disable=import-error
44import re
45from srmetadata import ALLOCATION_TAG, NAME_LABEL_TAG, NAME_DESCRIPTION_TAG, \
46 UUID_TAG, IS_A_SNAPSHOT_TAG, SNAPSHOT_OF_TAG, TYPE_TAG, VDI_TYPE_TAG, \
47 READ_ONLY_TAG, MANAGED_TAG, SNAPSHOT_TIME_TAG, METADATA_OF_POOL_TAG, \
48 LVMMetadataHandler, METADATA_OBJECT_TYPE_VDI, \
49 METADATA_OBJECT_TYPE_SR, METADATA_UPDATE_OBJECT_TYPE_TAG
50from metadata import retrieveXMLfromFile, _parseXML
51from xmlrpc.client import DateTime
52import glob
53from constants import CBTLOG_TAG
54from fairlock import Fairlock
55DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX)
57geneology = {}
58CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_TRIM",
59 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_MIRROR",
60 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
61 "VDI_RESET_ON_BOOT/2", "VDI_UPDATE", "VDI_CONFIG_CBT",
62 "VDI_ACTIVATE", "VDI_DEACTIVATE"]
64CONFIGURATION = [['device', 'local device path (required) (e.g. /dev/sda3)']]
66DRIVER_INFO = {
67 'name': 'Local VHD on LVM',
68 'description': 'SR plugin which represents disks as VHD disks on ' + \
69 'Logical Volumes within a locally-attached Volume Group',
70 'vendor': 'XenSource Inc',
71 'copyright': '(C) 2008 XenSource Inc',
72 'driver_version': '1.0',
73 'required_api_version': '1.0',
74 'capabilities': CAPABILITIES,
75 'configuration': CONFIGURATION
76 }
78PARAM_VHD = "vhd"
79PARAM_RAW = "raw"
81OPS_EXCLUSIVE = [
82 "sr_create", "sr_delete", "sr_attach", "sr_detach", "sr_scan",
83 "sr_update", "vdi_create", "vdi_delete", "vdi_resize", "vdi_snapshot",
84 "vdi_clone"]
86# Log if snapshot pauses VM for more than this many seconds
87LONG_SNAPTIME = 60
89class LVHDSR(SR.SR):
90 DRIVER_TYPE = 'lvhd'
92 PROVISIONING_TYPES = ["thin", "thick"]
93 PROVISIONING_DEFAULT = "thick"
94 THIN_PLUGIN = "lvhd-thin"
96 PLUGIN_ON_SLAVE = "on-slave"
98 FLAG_USE_VHD = "use_vhd"
99 MDVOLUME_NAME = "MGT"
101 ALLOCATION_QUANTUM = "allocation_quantum"
102 INITIAL_ALLOCATION = "initial_allocation"
104 LOCK_RETRY_INTERVAL = 3
105 LOCK_RETRY_ATTEMPTS = 10
107 TEST_MODE_KEY = "testmode"
108 TEST_MODE_VHD_FAIL_REPARENT_BEGIN = "vhd_fail_reparent_begin"
109 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR = "vhd_fail_reparent_locator"
110 TEST_MODE_VHD_FAIL_REPARENT_END = "vhd_fail_reparent_end"
111 TEST_MODE_VHD_FAIL_RESIZE_BEGIN = "vhd_fail_resize_begin"
112 TEST_MODE_VHD_FAIL_RESIZE_DATA = "vhd_fail_resize_data"
113 TEST_MODE_VHD_FAIL_RESIZE_METADATA = "vhd_fail_resize_metadata"
114 TEST_MODE_VHD_FAIL_RESIZE_END = "vhd_fail_resize_end"
116 ENV_VAR_VHD_TEST = {
117 TEST_MODE_VHD_FAIL_REPARENT_BEGIN:
118 "VHD_UTIL_TEST_FAIL_REPARENT_BEGIN",
119 TEST_MODE_VHD_FAIL_REPARENT_LOCATOR:
120 "VHD_UTIL_TEST_FAIL_REPARENT_LOCATOR",
121 TEST_MODE_VHD_FAIL_REPARENT_END:
122 "VHD_UTIL_TEST_FAIL_REPARENT_END",
123 TEST_MODE_VHD_FAIL_RESIZE_BEGIN:
124 "VHD_UTIL_TEST_FAIL_RESIZE_BEGIN",
125 TEST_MODE_VHD_FAIL_RESIZE_DATA:
126 "VHD_UTIL_TEST_FAIL_RESIZE_DATA_MOVED",
127 TEST_MODE_VHD_FAIL_RESIZE_METADATA:
128 "VHD_UTIL_TEST_FAIL_RESIZE_METADATA_MOVED",
129 TEST_MODE_VHD_FAIL_RESIZE_END:
130 "VHD_UTIL_TEST_FAIL_RESIZE_END"
131 }
132 testMode = ""
134 legacyMode = True
136 def handles(type):
137 """Returns True if this SR class understands the given dconf string"""
138 # we can pose as LVMSR or EXTSR for compatibility purposes
139 if __name__ == '__main__':
140 name = sys.argv[0]
141 else:
142 name = __name__
143 if name.endswith("LVMSR"):
144 return type == "lvm"
145 elif name.endswith("EXTSR"):
146 return type == "ext"
147 return type == LVHDSR.DRIVER_TYPE
148 handles = staticmethod(handles)
150 def load(self, sr_uuid):
151 self.ops_exclusive = OPS_EXCLUSIVE
153 self.isMaster = False
154 if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
155 self.isMaster = True
157 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
158 self.sr_vditype = SR.DEFAULT_TAP
159 self.uuid = sr_uuid
160 self.vgname = lvhdutil.VG_PREFIX + self.uuid
161 self.path = os.path.join(lvhdutil.VG_LOCATION, self.vgname)
162 self.mdpath = os.path.join(self.path, self.MDVOLUME_NAME)
163 self.provision = self.PROVISIONING_DEFAULT
165 self.other_conf = None
166 has_sr_ref = self.srcmd.params.get("sr_ref")
167 if has_sr_ref:
168 self.other_conf = self.session.xenapi.SR.get_other_config(self.sr_ref)
170 self.lvm_conf = None
171 if self.other_conf:
172 self.lvm_conf = self.other_conf.get('lvm-conf')
174 try:
175 self.lvmCache = lvmcache.LVMCache(self.vgname, self.lvm_conf)
176 except:
177 raise xs_errors.XenError('SRUnavailable', \
178 opterr='Failed to initialise the LVMCache')
179 self.lvActivator = LVActivator(self.uuid, self.lvmCache)
180 self.journaler = Journaler(self.lvmCache)
181 if not has_sr_ref:
182 return # must be a probe call
183 # Test for thick vs thin provisioning conf parameter
184 if 'allocation' in self.dconf: 184 ↛ 185line 184 didn't jump to line 185, because the condition on line 184 was never true
185 if self.dconf['allocation'] in self.PROVISIONING_TYPES:
186 self.provision = self.dconf['allocation']
187 else:
188 raise xs_errors.XenError('InvalidArg', \
189 opterr='Allocation parameter must be one of %s' % self.PROVISIONING_TYPES)
191 if self.other_conf.get(self.TEST_MODE_KEY): 191 ↛ 195line 191 didn't jump to line 195, because the condition on line 191 was never false
192 self.testMode = self.other_conf[self.TEST_MODE_KEY]
193 self._prepareTestMode()
195 self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
196 # sm_config flag overrides PBD, if any
197 if self.sm_config.get('allocation') in self.PROVISIONING_TYPES:
198 self.provision = self.sm_config.get('allocation')
200 if self.sm_config.get(self.FLAG_USE_VHD) == "true":
201 self.legacyMode = False
203 if lvutil._checkVG(self.vgname):
204 if self.isMaster and not self.cmd in ["vdi_attach", "vdi_detach", 204 ↛ 207line 204 didn't jump to line 207, because the condition on line 204 was never false
205 "vdi_activate", "vdi_deactivate"]:
206 self._undoAllJournals()
207 if not self.cmd in ["sr_attach", "sr_probe"]:
208 self._checkMetadataVolume()
210 self.mdexists = False
212 # get a VDI -> TYPE map from the storage
213 contains_uuid_regex = \
214 re.compile("^.*[0-9a-f]{8}-(([0-9a-f]{4})-){3}[0-9a-f]{12}.*")
215 self.storageVDIs = {}
217 for key in self.lvmCache.lvs.keys(): 217 ↛ 219line 217 didn't jump to line 219, because the loop on line 217 never started
218 # if the lvname has a uuid in it
219 type = None
220 if contains_uuid_regex.search(key) is not None:
221 if key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):
222 type = vhdutil.VDI_TYPE_VHD
223 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
224 elif key.startswith(lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW]):
225 type = vhdutil.VDI_TYPE_RAW
226 vdi = key[len(lvhdutil.LV_PREFIX[type]):]
227 else:
228 continue
230 if type is not None:
231 self.storageVDIs[vdi] = type
233 # check if metadata volume exists
234 try:
235 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
236 except:
237 pass
239 def cleanup(self):
240 # we don't need to hold the lock to dec refcounts of activated LVs
241 if not self.lvActivator.deactivateAll(): 241 ↛ 242line 241 didn't jump to line 242, because the condition on line 241 was never true
242 raise util.SMException("failed to deactivate LVs")
244 def updateSRMetadata(self, allocation):
245 try:
246 # Add SR specific SR metadata
247 sr_info = \
248 {ALLOCATION_TAG: allocation,
249 UUID_TAG: self.uuid,
250 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_label(self.sr_ref)),
251 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.SR.get_name_description(self.sr_ref))
252 }
254 vdi_info = {}
255 for vdi in self.session.xenapi.SR.get_VDIs(self.sr_ref):
256 vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi)
258 # Create the VDI entry in the SR metadata
259 vdi_info[vdi_uuid] = \
260 {
261 UUID_TAG: vdi_uuid,
262 NAME_LABEL_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi)),
263 NAME_DESCRIPTION_TAG: util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi)),
264 IS_A_SNAPSHOT_TAG: \
265 int(self.session.xenapi.VDI.get_is_a_snapshot(vdi)),
266 SNAPSHOT_OF_TAG: \
267 self.session.xenapi.VDI.get_snapshot_of(vdi),
268 SNAPSHOT_TIME_TAG: \
269 self.session.xenapi.VDI.get_snapshot_time(vdi),
270 TYPE_TAG: \
271 self.session.xenapi.VDI.get_type(vdi),
272 VDI_TYPE_TAG: \
273 self.session.xenapi.VDI.get_sm_config(vdi)['vdi_type'],
274 READ_ONLY_TAG: \
275 int(self.session.xenapi.VDI.get_read_only(vdi)),
276 METADATA_OF_POOL_TAG: \
277 self.session.xenapi.VDI.get_metadata_of_pool(vdi),
278 MANAGED_TAG: \
279 int(self.session.xenapi.VDI.get_managed(vdi))
280 }
281 LVMMetadataHandler(self.mdpath).writeMetadata(sr_info, vdi_info)
283 except Exception as e:
284 raise xs_errors.XenError('MetadataError', \
285 opterr='Error upgrading SR Metadata: %s' % str(e))
287 def syncMetadataAndStorage(self):
288 try:
289 # if a VDI is present in the metadata but not in the storage
290 # then delete it from the metadata
291 vdi_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
292 for vdi in list(vdi_info.keys()):
293 update_map = {}
294 if not vdi_info[vdi][UUID_TAG] in set(self.storageVDIs.keys()): 294 ↛ 301line 294 didn't jump to line 301, because the condition on line 294 was never false
295 # delete this from metadata
296 LVMMetadataHandler(self.mdpath). \
297 deleteVdiFromMetadata(vdi_info[vdi][UUID_TAG])
298 else:
299 # search for this in the metadata, compare types
300 # self.storageVDIs is a map of vdi_uuid to vdi_type
301 if vdi_info[vdi][VDI_TYPE_TAG] != \
302 self.storageVDIs[vdi_info[vdi][UUID_TAG]]:
303 # storage type takes authority
304 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] \
305 = METADATA_OBJECT_TYPE_VDI
306 update_map[UUID_TAG] = vdi_info[vdi][UUID_TAG]
307 update_map[VDI_TYPE_TAG] = \
308 self.storageVDIs[vdi_info[vdi][UUID_TAG]]
309 LVMMetadataHandler(self.mdpath) \
310 .updateMetadata(update_map)
311 else:
312 # This should never happen
313 pass
315 except Exception as e:
316 raise xs_errors.XenError('MetadataError', \
317 opterr='Error synching SR Metadata and storage: %s' % str(e))
319 def syncMetadataAndXapi(self):
320 try:
321 # get metadata
322 (sr_info, vdi_info) = \
323 LVMMetadataHandler(self.mdpath, False).getMetadata()
325 # First synch SR parameters
326 self.update(self.uuid)
328 # Now update the VDI information in the metadata if required
329 for vdi_offset in vdi_info.keys():
330 try:
331 vdi_ref = \
332 self.session.xenapi.VDI.get_by_uuid( \
333 vdi_info[vdi_offset][UUID_TAG])
334 except:
335 # may be the VDI is not in XAPI yet dont bother
336 continue
338 new_name_label = util.to_plain_string(self.session.xenapi.VDI.get_name_label(vdi_ref))
339 new_name_description = util.to_plain_string(self.session.xenapi.VDI.get_name_description(vdi_ref))
341 if vdi_info[vdi_offset][NAME_LABEL_TAG] != new_name_label or \
342 vdi_info[vdi_offset][NAME_DESCRIPTION_TAG] != \
343 new_name_description:
344 update_map = {}
345 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
346 METADATA_OBJECT_TYPE_VDI
347 update_map[UUID_TAG] = vdi_info[vdi_offset][UUID_TAG]
348 update_map[NAME_LABEL_TAG] = new_name_label
349 update_map[NAME_DESCRIPTION_TAG] = new_name_description
350 LVMMetadataHandler(self.mdpath) \
351 .updateMetadata(update_map)
352 except Exception as e:
353 raise xs_errors.XenError('MetadataError', \
354 opterr='Error synching SR Metadata and XAPI: %s' % str(e))
356 def _checkMetadataVolume(self):
357 util.SMlog("Entering _checkMetadataVolume")
358 self.mdexists = self.lvmCache.checkLV(self.MDVOLUME_NAME)
359 if self.isMaster: 359 ↛ 375line 359 didn't jump to line 375, because the condition on line 359 was never false
360 if self.mdexists and self.cmd == "sr_attach":
361 try:
362 # activate the management volume
363 # will be deactivated at detach time
364 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
365 self._synchSmConfigWithMetaData()
366 util.SMlog("Sync SR metadata and the state on the storage.")
367 self.syncMetadataAndStorage()
368 self.syncMetadataAndXapi()
369 except Exception as e:
370 util.SMlog("Exception in _checkMetadataVolume, " \
371 "Error: %s." % str(e))
372 elif not self.mdexists and not self.legacyMode: 372 ↛ 375line 372 didn't jump to line 375, because the condition on line 372 was never false
373 self._introduceMetaDataVolume()
375 if self.mdexists:
376 self.legacyMode = False
378 def _synchSmConfigWithMetaData(self):
379 util.SMlog("Synching sm-config with metadata volume")
381 try:
382 # get SR info from metadata
383 sr_info = {}
384 map = {}
385 sr_info = LVMMetadataHandler(self.mdpath, False).getMetadata()[0]
387 if sr_info == {}: 387 ↛ 388line 387 didn't jump to line 388, because the condition on line 387 was never true
388 raise Exception("Failed to get SR information from metadata.")
390 if "allocation" in sr_info: 390 ↛ 394line 390 didn't jump to line 394, because the condition on line 390 was never false
391 self.provision = sr_info.get("allocation")
392 map['allocation'] = sr_info.get("allocation")
393 else:
394 raise Exception("Allocation key not found in SR metadata. "
395 "SR info found: %s" % sr_info)
397 except Exception as e:
398 raise xs_errors.XenError(
399 'MetadataError',
400 opterr='Error reading SR params from '
401 'metadata Volume: %s' % str(e))
402 try:
403 map[self.FLAG_USE_VHD] = 'true'
404 self.session.xenapi.SR.set_sm_config(self.sr_ref, map)
405 except:
406 raise xs_errors.XenError(
407 'MetadataError',
408 opterr='Error updating sm_config key')
410 def _introduceMetaDataVolume(self):
411 util.SMlog("Creating Metadata volume")
412 try:
413 config = {}
414 self.lvmCache.create(self.MDVOLUME_NAME, 4 * 1024 * 1024)
416 # activate the management volume, will be deactivated at detach time
417 self.lvmCache.activateNoRefcount(self.MDVOLUME_NAME)
419 name_label = util.to_plain_string( \
420 self.session.xenapi.SR.get_name_label(self.sr_ref))
421 name_description = util.to_plain_string( \
422 self.session.xenapi.SR.get_name_description(self.sr_ref))
423 config[self.FLAG_USE_VHD] = "true"
424 config['allocation'] = self.provision
425 self.session.xenapi.SR.set_sm_config(self.sr_ref, config)
427 # Add the SR metadata
428 self.updateSRMetadata(self.provision)
429 except Exception as e:
430 raise xs_errors.XenError('MetadataError', \
431 opterr='Error introducing Metadata Volume: %s' % str(e))
433 def _removeMetadataVolume(self):
434 if self.mdexists:
435 try:
436 self.lvmCache.remove(self.MDVOLUME_NAME)
437 except:
438 raise xs_errors.XenError('MetadataError', \
439 opterr='Failed to delete MGT Volume')
441 def _refresh_size(self):
442 """
443 Refreshs the size of the backing device.
444 Return true if all paths/devices agree on the same size.
445 """
446 if hasattr(self, 'SCSIid'): 446 ↛ 448line 446 didn't jump to line 448, because the condition on line 446 was never true
447 # LVHDoHBASR, LVHDoISCSISR
448 return scsiutil.refresh_lun_size_by_SCSIid(getattr(self, 'SCSIid'))
449 else:
450 # LVHDSR
451 devices = self.dconf['device'].split(',')
452 scsiutil.refreshdev(devices)
453 return True
455 def _expand_size(self):
456 """
457 Expands the size of the SR by growing into additional availiable
458 space, if extra space is availiable on the backing device.
459 Needs to be called after a successful call of _refresh_size.
460 """
461 currentvgsize = lvutil._getVGstats(self.vgname)['physical_size']
462 # We are comparing PV- with VG-sizes that are aligned. Need a threshold
463 resizethreshold = 100 * 1024 * 1024 # 100MB
464 devices = self.dconf['device'].split(',')
465 totaldevicesize = 0
466 for device in devices:
467 totaldevicesize = totaldevicesize + scsiutil.getsize(device)
468 if totaldevicesize >= (currentvgsize + resizethreshold):
469 try:
470 if hasattr(self, 'SCSIid'): 470 ↛ 472line 470 didn't jump to line 472, because the condition on line 470 was never true
471 # LVHDoHBASR, LVHDoISCSISR might have slaves
472 scsiutil.refresh_lun_size_by_SCSIid_on_slaves(self.session,
473 getattr(self, 'SCSIid'))
474 util.SMlog("LVHDSR._expand_size for %s will resize the pv." %
475 self.uuid)
476 for pv in lvutil.get_pv_for_vg(self.vgname):
477 lvutil.resizePV(pv)
478 except:
479 util.logException("LVHDSR._expand_size for %s failed to resize"
480 " the PV" % self.uuid)
482 @deviceCheck
483 def create(self, uuid, size):
484 util.SMlog("LVHDSR.create for %s" % self.uuid)
485 if not self.isMaster:
486 util.SMlog('sr_create blocked for non-master')
487 raise xs_errors.XenError('LVMMaster')
489 if lvutil._checkVG(self.vgname):
490 raise xs_errors.XenError('SRExists')
492 # Check none of the devices already in use by other PBDs
493 if util.test_hostPBD_devs(self.session, uuid, self.dconf['device']):
494 raise xs_errors.XenError('SRInUse')
496 # Check serial number entry in SR records
497 for dev in self.dconf['device'].split(','):
498 if util.test_scsiserial(self.session, dev):
499 raise xs_errors.XenError('SRInUse')
501 lvutil.createVG(self.dconf['device'], self.vgname)
503 #Update serial number string
504 scsiutil.add_serial_record(self.session, self.sr_ref, \
505 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
507 # since this is an SR.create turn off legacy mode
508 self.session.xenapi.SR.add_to_sm_config(self.sr_ref, \
509 self.FLAG_USE_VHD, 'true')
511 def delete(self, uuid):
512 util.SMlog("LVHDSR.delete for %s" % self.uuid)
513 if not self.isMaster:
514 raise xs_errors.XenError('LVMMaster')
515 cleanup.gc_force(self.session, self.uuid)
517 success = True
518 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
519 if util.extractSRFromDevMapper(fileName) != self.uuid:
520 continue
522 if util.doesFileHaveOpenHandles(fileName):
523 util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \
524 "handles" % fileName)
525 success = False
526 continue
528 # Now attempt to remove the dev mapper entry
529 if not lvutil.removeDevMapperEntry(fileName, False):
530 success = False
531 continue
533 try:
534 lvname = os.path.basename(fileName.replace('-', '/'). \
535 replace('//', '-'))
536 lpath = os.path.join(self.path, lvname)
537 os.unlink(lpath)
538 except OSError as e:
539 if e.errno != errno.ENOENT:
540 util.SMlog("LVHDSR.delete: failed to remove the symlink for " \
541 "file %s. Error: %s" % (fileName, str(e)))
542 success = False
544 if success:
545 try:
546 if util.pathexists(self.path):
547 os.rmdir(self.path)
548 except Exception as e:
549 util.SMlog("LVHDSR.delete: failed to remove the symlink " \
550 "directory %s. Error: %s" % (self.path, str(e)))
551 success = False
553 self._removeMetadataVolume()
554 self.lvmCache.refresh()
555 if len(lvhdutil.getLVInfo(self.lvmCache)) > 0:
556 raise xs_errors.XenError('SRNotEmpty')
558 if not success:
559 raise Exception("LVHDSR delete failed, please refer to the log " \
560 "for details.")
562 lvutil.removeVG(self.dconf['device'], self.vgname)
563 self._cleanup()
565 def attach(self, uuid):
566 util.SMlog("LVHDSR.attach for %s" % self.uuid)
568 self._cleanup(True) # in case of host crashes, if detach wasn't called
570 if not util.match_uuid(self.uuid) or not lvutil._checkVG(self.vgname): 570 ↛ 571line 570 didn't jump to line 571, because the condition on line 570 was never true
571 raise xs_errors.XenError('SRUnavailable', \
572 opterr='no such volume group: %s' % self.vgname)
574 # Refresh the metadata status
575 self._checkMetadataVolume()
577 refreshsizeok = self._refresh_size()
579 if self.isMaster: 579 ↛ 590line 579 didn't jump to line 590, because the condition on line 579 was never false
580 if refreshsizeok: 580 ↛ 584line 580 didn't jump to line 584, because the condition on line 580 was never false
581 self._expand_size()
583 # Update SCSIid string
584 util.SMlog("Calling devlist_to_serial")
585 scsiutil.add_serial_record(
586 self.session, self.sr_ref,
587 scsiutil.devlist_to_serialstring(self.dconf['device'].split(',')))
589 # Test Legacy Mode Flag and update if VHD volumes exist
590 if self.isMaster and self.legacyMode: 590 ↛ 591line 590 didn't jump to line 591, because the condition on line 590 was never true
591 vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
592 for uuid, info in vdiInfo.items():
593 if info.vdiType == vhdutil.VDI_TYPE_VHD:
594 self.legacyMode = False
595 map = self.session.xenapi.SR.get_sm_config(self.sr_ref)
596 self._introduceMetaDataVolume()
597 break
599 # Set the block scheduler
600 for dev in self.dconf['device'].split(','):
601 self.block_setscheduler(dev)
603 def detach(self, uuid):
604 util.SMlog("LVHDSR.detach for %s" % self.uuid)
605 cleanup.abort(self.uuid)
607 # Do a best effort cleanup of the dev mapper entries
608 # go through all devmapper entries for this VG
609 success = True
610 for fileName in glob.glob(DEV_MAPPER_ROOT + '*'):
611 if util.extractSRFromDevMapper(fileName) != self.uuid: 611 ↛ 612line 611 didn't jump to line 612, because the condition on line 611 was never true
612 continue
614 with Fairlock('devicemapper'):
615 # check if any file has open handles
616 if util.doesFileHaveOpenHandles(fileName):
617 # if yes, log this and signal failure
618 util.SMlog(
619 f"LVHDSR.detach: The dev mapper entry {fileName} has "
620 "open handles")
621 success = False
622 continue
624 # Now attempt to remove the dev mapper entry
625 if not lvutil.removeDevMapperEntry(fileName, False): 625 ↛ 626line 625 didn't jump to line 626, because the condition on line 625 was never true
626 success = False
627 continue
629 # also remove the symlinks from /dev/VG-XenStorage-SRUUID/*
630 try:
631 lvname = os.path.basename(fileName.replace('-', '/'). \
632 replace('//', '-'))
633 lvname = os.path.join(self.path, lvname)
634 util.force_unlink(lvname)
635 except Exception as e:
636 util.SMlog("LVHDSR.detach: failed to remove the symlink for " \
637 "file %s. Error: %s" % (fileName, str(e)))
638 success = False
640 # now remove the directory where the symlinks are
641 # this should pass as the directory should be empty by now
642 if success:
643 try:
644 if util.pathexists(self.path): 644 ↛ 645line 644 didn't jump to line 645, because the condition on line 644 was never true
645 os.rmdir(self.path)
646 except Exception as e:
647 util.SMlog("LVHDSR.detach: failed to remove the symlink " \
648 "directory %s. Error: %s" % (self.path, str(e)))
649 success = False
651 if not success:
652 raise Exception("SR detach failed, please refer to the log " \
653 "for details.")
655 # Don't delete lock files on the master as it will break the locking
656 # between SM and any GC thread that survives through SR.detach.
657 # However, we should still delete lock files on slaves as it is the
658 # only place to do so.
659 self._cleanup(self.isMaster)
661 def forget_vdi(self, uuid):
662 if not self.legacyMode:
663 LVMMetadataHandler(self.mdpath).deleteVdiFromMetadata(uuid)
664 super(LVHDSR, self).forget_vdi(uuid)
666 def scan(self, uuid):
667 activated = True
668 try:
669 lvname = ''
670 util.SMlog("LVHDSR.scan for %s" % self.uuid)
671 if not self.isMaster: 671 ↛ 672line 671 didn't jump to line 672, because the condition on line 671 was never true
672 util.SMlog('sr_scan blocked for non-master')
673 raise xs_errors.XenError('LVMMaster')
675 if self._refresh_size(): 675 ↛ 677line 675 didn't jump to line 677, because the condition on line 675 was never false
676 self._expand_size()
677 self.lvmCache.refresh()
678 cbt_vdis = self.lvmCache.getTagged(CBTLOG_TAG)
679 self._loadvdis()
680 stats = lvutil._getVGstats(self.vgname)
681 self.physical_size = stats['physical_size']
682 self.physical_utilisation = stats['physical_utilisation']
684 # Now check if there are any VDIs in the metadata, which are not in
685 # XAPI
686 if self.mdexists: 686 ↛ 796line 686 didn't jump to line 796, because the condition on line 686 was never false
687 vdiToSnaps = {}
688 # get VDIs from XAPI
689 vdis = self.session.xenapi.SR.get_VDIs(self.sr_ref)
690 vdi_uuids = set([])
691 for vdi in vdis:
692 vdi_uuids.add(self.session.xenapi.VDI.get_uuid(vdi))
694 Dict = LVMMetadataHandler(self.mdpath, False).getMetadata()[1]
696 for vdi in list(Dict.keys()):
697 vdi_uuid = Dict[vdi][UUID_TAG]
698 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])): 698 ↛ 699line 698 didn't jump to line 699, because the condition on line 698 was never true
699 if Dict[vdi][SNAPSHOT_OF_TAG] in vdiToSnaps:
700 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]].append(vdi_uuid)
701 else:
702 vdiToSnaps[Dict[vdi][SNAPSHOT_OF_TAG]] = [vdi_uuid]
704 if vdi_uuid not in vdi_uuids: 704 ↛ 705line 704 didn't jump to line 705, because the condition on line 704 was never true
705 util.SMlog("Introduce VDI %s as it is present in " \
706 "metadata and not in XAPI." % vdi_uuid)
707 sm_config = {}
708 sm_config['vdi_type'] = Dict[vdi][VDI_TYPE_TAG]
709 lvname = "%s%s" % \
710 (lvhdutil.LV_PREFIX[sm_config['vdi_type']], vdi_uuid)
711 self.lvmCache.activateNoRefcount(lvname)
712 activated = True
713 lvPath = os.path.join(self.path, lvname)
715 if Dict[vdi][VDI_TYPE_TAG] == vhdutil.VDI_TYPE_RAW:
716 size = self.lvmCache.getSize( \
717 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_RAW] + \
718 vdi_uuid)
719 utilisation = \
720 util.roundup(lvutil.LVM_SIZE_INCREMENT,
721 int(size))
722 else:
723 parent = \
724 vhdutil._getVHDParentNoCheck(lvPath)
726 if parent is not None:
727 sm_config['vhd-parent'] = parent[len( \
728 lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD]):]
729 size = vhdutil.getSizeVirt(lvPath)
730 if self.provision == "thin":
731 utilisation = \
732 util.roundup(lvutil.LVM_SIZE_INCREMENT,
733 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
734 else:
735 utilisation = lvhdutil.calcSizeVHDLV(int(size))
737 vdi_ref = self.session.xenapi.VDI.db_introduce(
738 vdi_uuid,
739 Dict[vdi][NAME_LABEL_TAG],
740 Dict[vdi][NAME_DESCRIPTION_TAG],
741 self.sr_ref,
742 Dict[vdi][TYPE_TAG],
743 False,
744 bool(int(Dict[vdi][READ_ONLY_TAG])),
745 {},
746 vdi_uuid,
747 {},
748 sm_config)
750 self.session.xenapi.VDI.set_managed(vdi_ref,
751 bool(int(Dict[vdi][MANAGED_TAG])))
752 self.session.xenapi.VDI.set_virtual_size(vdi_ref,
753 str(size))
754 self.session.xenapi.VDI.set_physical_utilisation( \
755 vdi_ref, str(utilisation))
756 self.session.xenapi.VDI.set_is_a_snapshot( \
757 vdi_ref, bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])))
758 if bool(int(Dict[vdi][IS_A_SNAPSHOT_TAG])):
759 self.session.xenapi.VDI.set_snapshot_time( \
760 vdi_ref, DateTime(Dict[vdi][SNAPSHOT_TIME_TAG]))
761 if Dict[vdi][TYPE_TAG] == 'metadata':
762 self.session.xenapi.VDI.set_metadata_of_pool( \
763 vdi_ref, Dict[vdi][METADATA_OF_POOL_TAG])
765 # Update CBT status of disks either just added
766 # or already in XAPI
767 cbt_logname = "%s.%s" % (vdi_uuid, CBTLOG_TAG)
768 if cbt_logname in cbt_vdis: 768 ↛ 769line 768 didn't jump to line 769, because the condition on line 768 was never true
769 vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
770 self.session.xenapi.VDI.set_cbt_enabled(vdi_ref, True)
771 # For existing VDIs, update local state too
772 # Scan in base class SR updates existing VDIs
773 # again based on local states
774 if vdi_uuid in self.vdis:
775 self.vdis[vdi_uuid].cbt_enabled = True
776 cbt_vdis.remove(cbt_logname)
778 # Now set the snapshot statuses correctly in XAPI
779 for srcvdi in vdiToSnaps.keys(): 779 ↛ 780line 779 didn't jump to line 780, because the loop on line 779 never started
780 try:
781 srcref = self.session.xenapi.VDI.get_by_uuid(srcvdi)
782 except:
783 # the source VDI no longer exists, continue
784 continue
786 for snapvdi in vdiToSnaps[srcvdi]:
787 try:
788 # this might fail in cases where its already set
789 snapref = \
790 self.session.xenapi.VDI.get_by_uuid(snapvdi)
791 self.session.xenapi.VDI.set_snapshot_of(snapref, srcref)
792 except Exception as e:
793 util.SMlog("Setting snapshot failed. " \
794 "Error: %s" % str(e))
796 if cbt_vdis: 796 ↛ 807line 796 didn't jump to line 807, because the condition on line 796 was never false
797 # If we have items remaining in this list,
798 # they are cbt_metadata VDI that XAPI doesn't know about
799 # Add them to self.vdis and they'll get added to the DB
800 for cbt_vdi in cbt_vdis: 800 ↛ 801line 800 didn't jump to line 801, because the loop on line 800 never started
801 cbt_uuid = cbt_vdi.split(".")[0]
802 new_vdi = self.vdi(cbt_uuid)
803 new_vdi.ty = "cbt_metadata"
804 new_vdi.cbt_enabled = True
805 self.vdis[cbt_uuid] = new_vdi
807 super(LVHDSR, self).scan(uuid)
808 self._kickGC()
810 finally:
811 if lvname != '' and activated: 811 ↛ 812line 811 didn't jump to line 812, because the condition on line 811 was never true
812 self.lvmCache.deactivateNoRefcount(lvname)
814 def update(self, uuid):
815 if not lvutil._checkVG(self.vgname): 815 ↛ 816line 815 didn't jump to line 816, because the condition on line 815 was never true
816 return
817 self._updateStats(uuid, 0)
819 if self.legacyMode: 819 ↛ 820line 819 didn't jump to line 820, because the condition on line 819 was never true
820 return
822 # synch name_label in metadata with XAPI
823 update_map = {}
824 update_map = {METADATA_UPDATE_OBJECT_TYPE_TAG: \
825 METADATA_OBJECT_TYPE_SR,
826 NAME_LABEL_TAG: util.to_plain_string( \
827 self.session.xenapi.SR.get_name_label(self.sr_ref)),
828 NAME_DESCRIPTION_TAG: util.to_plain_string( \
829 self.session.xenapi.SR.get_name_description(self.sr_ref))
830 }
831 LVMMetadataHandler(self.mdpath).updateMetadata(update_map)
833 def _updateStats(self, uuid, virtAllocDelta):
834 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref))
835 self.virtual_allocation = valloc + virtAllocDelta
836 util.SMlog("Setting virtual_allocation of SR %s to %d" %
837 (uuid, self.virtual_allocation))
838 stats = lvutil._getVGstats(self.vgname)
839 self.physical_size = stats['physical_size']
840 self.physical_utilisation = stats['physical_utilisation']
841 self._db_update()
843 @deviceCheck
844 def probe(self):
845 return lvutil.srlist_toxml(
846 lvutil.scan_srlist(lvhdutil.VG_PREFIX, self.dconf['device']),
847 lvhdutil.VG_PREFIX,
848 ('metadata' in self.srcmd.params['sr_sm_config'] and \
849 self.srcmd.params['sr_sm_config']['metadata'] == 'true'))
851 def vdi(self, uuid):
852 return LVHDVDI(self, uuid)
854 def _loadvdis(self):
855 self.virtual_allocation = 0
856 self.vdiInfo = lvhdutil.getVDIInfo(self.lvmCache)
857 self.allVDIs = {}
859 for uuid, info in self.vdiInfo.items():
860 if uuid.startswith(cleanup.SR.TMP_RENAME_PREFIX): 860 ↛ 861line 860 didn't jump to line 861, because the condition on line 860 was never true
861 continue
862 if info.scanError: 862 ↛ 863line 862 didn't jump to line 863, because the condition on line 862 was never true
863 raise xs_errors.XenError('VDIUnavailable', \
864 opterr='Error scanning VDI %s' % uuid)
865 self.vdis[uuid] = self.allVDIs[uuid] = self.vdi(uuid)
866 if not self.vdis[uuid].hidden: 866 ↛ 859line 866 didn't jump to line 859, because the condition on line 866 was never false
867 self.virtual_allocation += self.vdis[uuid].utilisation
869 for uuid, vdi in self.vdis.items():
870 if vdi.parent: 870 ↛ 871line 870 didn't jump to line 871, because the condition on line 870 was never true
871 if vdi.parent in self.vdis:
872 self.vdis[vdi.parent].read_only = True
873 if vdi.parent in geneology:
874 geneology[vdi.parent].append(uuid)
875 else:
876 geneology[vdi.parent] = [uuid]
878 # Now remove all hidden leaf nodes to avoid introducing records that
879 # will be GC'ed
880 for uuid in list(self.vdis.keys()):
881 if uuid not in geneology and self.vdis[uuid].hidden: 881 ↛ 882line 881 didn't jump to line 882, because the condition on line 881 was never true
882 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid)
883 del self.vdis[uuid]
885 def _ensureSpaceAvailable(self, amount_needed):
886 space_available = lvutil._getVGstats(self.vgname)['freespace']
887 if (space_available < amount_needed):
888 util.SMlog("Not enough space! free space: %d, need: %d" % \
889 (space_available, amount_needed))
890 raise xs_errors.XenError('SRNoSpace')
892 def _handleInterruptedCloneOps(self):
893 entries = self.journaler.getAll(LVHDVDI.JRN_CLONE)
894 for uuid, val in entries.items(): 894 ↛ 895line 894 didn't jump to line 895, because the loop on line 894 never started
895 util.fistpoint.activate("LVHDRT_clone_vdi_before_undo_clone", self.uuid)
896 self._handleInterruptedCloneOp(uuid, val)
897 util.fistpoint.activate("LVHDRT_clone_vdi_after_undo_clone", self.uuid)
898 self.journaler.remove(LVHDVDI.JRN_CLONE, uuid)
900 def _handleInterruptedCoalesceLeaf(self):
901 entries = self.journaler.getAll(cleanup.VDI.JRN_LEAF)
902 if len(entries) > 0: 902 ↛ 903line 902 didn't jump to line 903, because the condition on line 902 was never true
903 util.SMlog("*** INTERRUPTED COALESCE-LEAF OP DETECTED ***")
904 cleanup.gc_force(self.session, self.uuid)
905 self.lvmCache.refresh()
907 def _handleInterruptedCloneOp(self, origUuid, jval, forceUndo=False):
908 """Either roll back or finalize the interrupted snapshot/clone
909 operation. Rolling back is unsafe if the leaf VHDs have already been
910 in use and written to. However, it is always safe to roll back while
911 we're still in the context of the failed snapshot operation since the
912 VBD is paused for the duration of the operation"""
913 util.SMlog("*** INTERRUPTED CLONE OP: for %s (%s)" % (origUuid, jval))
914 lvs = lvhdutil.getLVInfo(self.lvmCache)
915 baseUuid, clonUuid = jval.split("_")
917 # is there a "base copy" VDI?
918 if not lvs.get(baseUuid):
919 # no base copy: make sure the original is there
920 if lvs.get(origUuid):
921 util.SMlog("*** INTERRUPTED CLONE OP: nothing to do")
922 return
923 raise util.SMException("base copy %s not present, " \
924 "but no original %s found" % (baseUuid, origUuid))
926 if forceUndo:
927 util.SMlog("Explicit revert")
928 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
929 return
931 if not lvs.get(origUuid) or (clonUuid and not lvs.get(clonUuid)):
932 util.SMlog("One or both leaves missing => revert")
933 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
934 return
936 vdis = lvhdutil.getVDIInfo(self.lvmCache)
937 if vdis[origUuid].scanError or (clonUuid and vdis[clonUuid].scanError):
938 util.SMlog("One or both leaves invalid => revert")
939 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
940 return
942 orig = vdis[origUuid]
943 base = vdis[baseUuid]
944 self.lvActivator.activate(baseUuid, base.lvName, False)
945 self.lvActivator.activate(origUuid, orig.lvName, False)
946 if orig.parentUuid != baseUuid:
947 parent = vdis[orig.parentUuid]
948 self.lvActivator.activate(parent.uuid, parent.lvName, False)
949 origPath = os.path.join(self.path, orig.lvName)
950 if not vhdutil.check(origPath):
951 util.SMlog("Orig VHD invalid => revert")
952 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
953 return
955 if clonUuid:
956 clon = vdis[clonUuid]
957 clonPath = os.path.join(self.path, clon.lvName)
958 self.lvActivator.activate(clonUuid, clon.lvName, False)
959 if not vhdutil.check(clonPath):
960 util.SMlog("Clon VHD invalid => revert")
961 self._undoCloneOp(lvs, origUuid, baseUuid, clonUuid)
962 return
964 util.SMlog("Snapshot appears valid, will not roll back")
965 self._completeCloneOp(vdis, origUuid, baseUuid, clonUuid)
967 def _undoCloneOp(self, lvs, origUuid, baseUuid, clonUuid):
968 base = lvs[baseUuid]
969 basePath = os.path.join(self.path, base.name)
971 # make the parent RW
972 if base.readonly:
973 self.lvmCache.setReadonly(base.name, False)
975 ns = lvhdutil.NS_PREFIX_LVM + self.uuid
976 origRefcountBinary = RefCounter.check(origUuid, ns)[1]
977 origRefcountNormal = 0
979 # un-hide the parent
980 if base.vdiType == vhdutil.VDI_TYPE_VHD:
981 self.lvActivator.activate(baseUuid, base.name, False)
982 origRefcountNormal = 1
983 vhdInfo = vhdutil.getVHDInfo(basePath, lvhdutil.extractUuid, False)
984 if base.vdiType == vhdutil.VDI_TYPE_VHD and vhdInfo.hidden:
985 vhdutil.setHidden(basePath, False)
986 elif base.vdiType == vhdutil.VDI_TYPE_RAW and base.hidden:
987 self.lvmCache.setHidden(base.name, False)
989 # remove the child nodes
990 if clonUuid and lvs.get(clonUuid):
991 if lvs[clonUuid].vdiType != vhdutil.VDI_TYPE_VHD:
992 raise util.SMException("clone %s not VHD" % clonUuid)
993 self.lvmCache.remove(lvs[clonUuid].name)
994 if self.lvActivator.get(clonUuid, False):
995 self.lvActivator.remove(clonUuid, False)
996 if lvs.get(origUuid):
997 self.lvmCache.remove(lvs[origUuid].name)
999 # inflate the parent to fully-allocated size
1000 if base.vdiType == vhdutil.VDI_TYPE_VHD:
1001 fullSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
1002 lvhdutil.inflate(self.journaler, self.uuid, baseUuid, fullSize)
1004 # rename back
1005 origLV = lvhdutil.LV_PREFIX[base.vdiType] + origUuid
1006 self.lvmCache.rename(base.name, origLV)
1007 RefCounter.reset(baseUuid, ns)
1008 if self.lvActivator.get(baseUuid, False):
1009 self.lvActivator.replace(baseUuid, origUuid, origLV, False)
1010 RefCounter.set(origUuid, origRefcountNormal, origRefcountBinary, ns)
1012 # At this stage, tapdisk and SM vdi will be in paused state. Remove
1013 # flag to facilitate vm deactivate
1014 origVdiRef = self.session.xenapi.VDI.get_by_uuid(origUuid)
1015 self.session.xenapi.VDI.remove_from_sm_config(origVdiRef, 'paused')
1017 # update LVM metadata on slaves
1018 slaves = util.get_slaves_attached_on(self.session, [origUuid])
1019 lvhdutil.lvRefreshOnSlaves(self.session, self.uuid, self.vgname,
1020 origLV, origUuid, slaves)
1022 util.SMlog("*** INTERRUPTED CLONE OP: rollback success")
1024 def _completeCloneOp(self, vdis, origUuid, baseUuid, clonUuid):
1025 """Finalize the interrupted snapshot/clone operation. This must not be
1026 called from the live snapshot op context because we attempt to pause/
1027 unpause the VBD here (the VBD is already paused during snapshot, so it
1028 would cause a deadlock)"""
1029 base = vdis[baseUuid]
1030 clon = None
1031 if clonUuid:
1032 clon = vdis[clonUuid]
1034 cleanup.abort(self.uuid)
1036 # make sure the parent is hidden and read-only
1037 if not base.hidden:
1038 if base.vdiType == vhdutil.VDI_TYPE_RAW:
1039 self.lvmCache.setHidden(base.lvName)
1040 else:
1041 basePath = os.path.join(self.path, base.lvName)
1042 vhdutil.setHidden(basePath)
1043 if not base.lvReadonly:
1044 self.lvmCache.setReadonly(base.lvName, True)
1046 # NB: since this snapshot-preserving call is only invoked outside the
1047 # snapshot op context, we assume the LVM metadata on the involved slave
1048 # has by now been refreshed and do not attempt to do it here
1050 # Update the original record
1051 try:
1052 vdi_ref = self.session.xenapi.VDI.get_by_uuid(origUuid)
1053 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
1054 type = self.session.xenapi.VDI.get_type(vdi_ref)
1055 sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1056 sm_config['vhd-parent'] = baseUuid
1057 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config)
1058 except XenAPI.Failure:
1059 util.SMlog("ERROR updating the orig record")
1061 # introduce the new VDI records
1062 if clonUuid:
1063 try:
1064 clon_vdi = VDI.VDI(self, clonUuid)
1065 clon_vdi.read_only = False
1066 clon_vdi.location = clonUuid
1067 clon_vdi.utilisation = clon.sizeLV
1068 clon_vdi.sm_config = {
1069 "vdi_type": vhdutil.VDI_TYPE_VHD,
1070 "vhd-parent": baseUuid}
1072 if not self.legacyMode:
1073 LVMMetadataHandler(self.mdpath). \
1074 ensureSpaceIsAvailableForVdis(1)
1076 clon_vdi_ref = clon_vdi._db_introduce()
1077 util.SMlog("introduced clon VDI: %s (%s)" % \
1078 (clon_vdi_ref, clonUuid))
1080 vdi_info = {UUID_TAG: clonUuid,
1081 NAME_LABEL_TAG: clon_vdi.label,
1082 NAME_DESCRIPTION_TAG: clon_vdi.description,
1083 IS_A_SNAPSHOT_TAG: 0,
1084 SNAPSHOT_OF_TAG: '',
1085 SNAPSHOT_TIME_TAG: '',
1086 TYPE_TAG: type,
1087 VDI_TYPE_TAG: clon_vdi.sm_config['vdi_type'],
1088 READ_ONLY_TAG: int(clon_vdi.read_only),
1089 MANAGED_TAG: int(clon_vdi.managed),
1090 METADATA_OF_POOL_TAG: ''
1091 }
1093 if not self.legacyMode:
1094 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1096 except XenAPI.Failure:
1097 util.SMlog("ERROR introducing the clon record")
1099 try:
1100 base_vdi = VDI.VDI(self, baseUuid) # readonly parent
1101 base_vdi.label = "base copy"
1102 base_vdi.read_only = True
1103 base_vdi.location = baseUuid
1104 base_vdi.size = base.sizeVirt
1105 base_vdi.utilisation = base.sizeLV
1106 base_vdi.managed = False
1107 base_vdi.sm_config = {
1108 "vdi_type": vhdutil.VDI_TYPE_VHD,
1109 "vhd-parent": baseUuid}
1111 if not self.legacyMode:
1112 LVMMetadataHandler(self.mdpath).ensureSpaceIsAvailableForVdis(1)
1114 base_vdi_ref = base_vdi._db_introduce()
1115 util.SMlog("introduced base VDI: %s (%s)" % \
1116 (base_vdi_ref, baseUuid))
1118 vdi_info = {UUID_TAG: baseUuid,
1119 NAME_LABEL_TAG: base_vdi.label,
1120 NAME_DESCRIPTION_TAG: base_vdi.description,
1121 IS_A_SNAPSHOT_TAG: 0,
1122 SNAPSHOT_OF_TAG: '',
1123 SNAPSHOT_TIME_TAG: '',
1124 TYPE_TAG: type,
1125 VDI_TYPE_TAG: base_vdi.sm_config['vdi_type'],
1126 READ_ONLY_TAG: int(base_vdi.read_only),
1127 MANAGED_TAG: int(base_vdi.managed),
1128 METADATA_OF_POOL_TAG: ''
1129 }
1131 if not self.legacyMode:
1132 LVMMetadataHandler(self.mdpath).addVdi(vdi_info)
1133 except XenAPI.Failure:
1134 util.SMlog("ERROR introducing the base record")
1136 util.SMlog("*** INTERRUPTED CLONE OP: complete")
1138 def _undoAllJournals(self):
1139 """Undo all VHD & SM interrupted journaled operations. This call must
1140 be serialized with respect to all operations that create journals"""
1141 # undoing interrupted inflates must be done first, since undoing VHD
1142 # ops might require inflations
1143 self.lock.acquire()
1144 try:
1145 self._undoAllInflateJournals()
1146 self._undoAllVHDJournals()
1147 self._handleInterruptedCloneOps()
1148 self._handleInterruptedCoalesceLeaf()
1149 finally:
1150 self.lock.release()
1151 self.cleanup()
1153 def _undoAllInflateJournals(self):
1154 entries = self.journaler.getAll(lvhdutil.JRN_INFLATE)
1155 if len(entries) == 0:
1156 return
1157 self._loadvdis()
1158 for uuid, val in entries.items():
1159 vdi = self.vdis.get(uuid)
1160 if vdi: 1160 ↛ 1175line 1160 didn't jump to line 1175, because the condition on line 1160 was never false
1161 util.SMlog("Found inflate journal %s, deflating %s to %s" % \
1162 (uuid, vdi.path, val))
1163 if vdi.readonly: 1163 ↛ 1164line 1163 didn't jump to line 1164, because the condition on line 1163 was never true
1164 self.lvmCache.setReadonly(vdi.lvname, False)
1165 self.lvActivator.activate(uuid, vdi.lvname, False)
1166 currSizeLV = self.lvmCache.getSize(vdi.lvname)
1167 util.zeroOut(vdi.path, currSizeLV - vhdutil.VHD_FOOTER_SIZE,
1168 vhdutil.VHD_FOOTER_SIZE)
1169 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val))
1170 if vdi.readonly: 1170 ↛ 1171line 1170 didn't jump to line 1171, because the condition on line 1170 was never true
1171 self.lvmCache.setReadonly(vdi.lvname, True)
1172 if "true" == self.session.xenapi.SR.get_shared(self.sr_ref): 1172 ↛ 1173line 1172 didn't jump to line 1173, because the condition on line 1172 was never true
1173 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1174 self.vgname, vdi.lvname, uuid)
1175 self.journaler.remove(lvhdutil.JRN_INFLATE, uuid)
1176 delattr(self, "vdiInfo")
1177 delattr(self, "allVDIs")
1179 def _undoAllVHDJournals(self):
1180 """check if there are VHD journals in existence and revert them"""
1181 journals = lvhdutil.getAllVHDJournals(self.lvmCache)
1182 if len(journals) == 0: 1182 ↛ 1184line 1182 didn't jump to line 1184, because the condition on line 1182 was never false
1183 return
1184 self._loadvdis()
1185 for uuid, jlvName in journals:
1186 vdi = self.vdis[uuid]
1187 util.SMlog("Found VHD journal %s, reverting %s" % (uuid, vdi.path))
1188 self.lvActivator.activate(uuid, vdi.lvname, False)
1189 self.lvmCache.activateNoRefcount(jlvName)
1190 fullSize = lvhdutil.calcSizeVHDLV(vdi.size)
1191 lvhdutil.inflate(self.journaler, self.uuid, vdi.uuid, fullSize)
1192 try:
1193 jFile = os.path.join(self.path, jlvName)
1194 vhdutil.revert(vdi.path, jFile)
1195 except util.CommandException:
1196 util.logException("VHD journal revert")
1197 vhdutil.check(vdi.path)
1198 util.SMlog("VHD revert failed but VHD ok: removing journal")
1199 # Attempt to reclaim unused space
1200 vhdInfo = vhdutil.getVHDInfo(vdi.path, lvhdutil.extractUuid, False)
1201 NewSize = lvhdutil.calcSizeVHDLV(vhdInfo.sizeVirt)
1202 if NewSize < fullSize:
1203 lvhdutil.deflate(self.lvmCache, vdi.lvname, int(NewSize))
1204 lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
1205 self.vgname, vdi.lvname, uuid)
1206 self.lvmCache.remove(jlvName)
1207 delattr(self, "vdiInfo")
1208 delattr(self, "allVDIs")
1210 def _updateSlavesPreClone(self, hostRefs, origOldLV):
1211 masterRef = util.get_this_host_ref(self.session)
1212 args = {"vgName": self.vgname,
1213 "action1": "deactivateNoRefcount",
1214 "lvName1": origOldLV}
1215 for hostRef in hostRefs:
1216 if hostRef == masterRef: 1216 ↛ 1217line 1216 didn't jump to line 1217, because the condition on line 1216 was never true
1217 continue
1218 util.SMlog("Deactivate VDI on %s" % hostRef)
1219 rv = self.session.xenapi.host.call_plugin(hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1220 util.SMlog("call-plugin returned: %s" % rv)
1221 if not rv: 1221 ↛ 1222line 1221 didn't jump to line 1222, because the condition on line 1221 was never true
1222 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1224 def _updateSlavesOnClone(self, hostRefs, origOldLV, origLV,
1225 baseUuid, baseLV):
1226 """We need to reactivate the original LV on each slave (note that the
1227 name for the original LV might change), as well as init the refcount
1228 for the base LV"""
1229 args = {"vgName": self.vgname,
1230 "action1": "refresh",
1231 "lvName1": origLV,
1232 "action2": "activate",
1233 "ns2": lvhdutil.NS_PREFIX_LVM + self.uuid,
1234 "lvName2": baseLV,
1235 "uuid2": baseUuid}
1237 masterRef = util.get_this_host_ref(self.session)
1238 for hostRef in hostRefs:
1239 if hostRef == masterRef: 1239 ↛ 1240line 1239 didn't jump to line 1240, because the condition on line 1239 was never true
1240 continue
1241 util.SMlog("Updating %s, %s, %s on slave %s" % \
1242 (origOldLV, origLV, baseLV, hostRef))
1243 rv = self.session.xenapi.host.call_plugin(
1244 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1245 util.SMlog("call-plugin returned: %s" % rv)
1246 if not rv: 1246 ↛ 1247line 1246 didn't jump to line 1247, because the condition on line 1246 was never true
1247 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1249 def _updateSlavesOnCBTClone(self, hostRefs, cbtlog):
1250 """Reactivate and refresh CBT log file on slaves"""
1251 args = {"vgName": self.vgname,
1252 "action1": "deactivateNoRefcount",
1253 "lvName1": cbtlog,
1254 "action2": "refresh",
1255 "lvName2": cbtlog}
1257 masterRef = util.get_this_host_ref(self.session)
1258 for hostRef in hostRefs:
1259 if hostRef == masterRef: 1259 ↛ 1260line 1259 didn't jump to line 1260, because the condition on line 1259 was never true
1260 continue
1261 util.SMlog("Updating %s on slave %s" % (cbtlog, hostRef))
1262 rv = self.session.xenapi.host.call_plugin(
1263 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1264 util.SMlog("call-plugin returned: %s" % rv)
1265 if not rv: 1265 ↛ 1266line 1265 didn't jump to line 1266, because the condition on line 1265 was never true
1266 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1268 def _updateSlavesOnRemove(self, hostRefs, baseUuid, baseLV):
1269 """Tell the slave we deleted the base image"""
1270 args = {"vgName": self.vgname,
1271 "action1": "cleanupLockAndRefcount",
1272 "uuid1": baseUuid,
1273 "ns1": lvhdutil.NS_PREFIX_LVM + self.uuid}
1275 masterRef = util.get_this_host_ref(self.session)
1276 for hostRef in hostRefs:
1277 if hostRef == masterRef: 1277 ↛ 1278line 1277 didn't jump to line 1278, because the condition on line 1277 was never true
1278 continue
1279 util.SMlog("Cleaning locks for %s on slave %s" % (baseLV, hostRef))
1280 rv = self.session.xenapi.host.call_plugin(
1281 hostRef, self.PLUGIN_ON_SLAVE, "multi", args)
1282 util.SMlog("call-plugin returned: %s" % rv)
1283 if not rv: 1283 ↛ 1284line 1283 didn't jump to line 1284, because the condition on line 1283 was never true
1284 raise Exception('plugin %s failed' % self.PLUGIN_ON_SLAVE)
1286 def _cleanup(self, skipLockCleanup=False):
1287 """delete stale refcounter, flag, and lock files"""
1288 RefCounter.resetAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1289 IPCFlag(self.uuid).clearAll()
1290 if not skipLockCleanup: 1290 ↛ 1291line 1290 didn't jump to line 1291, because the condition on line 1290 was never true
1291 Lock.cleanupAll(self.uuid)
1292 Lock.cleanupAll(lvhdutil.NS_PREFIX_LVM + self.uuid)
1294 def _prepareTestMode(self):
1295 util.SMlog("Test mode: %s" % self.testMode)
1296 if self.ENV_VAR_VHD_TEST.get(self.testMode): 1296 ↛ 1297line 1296 didn't jump to line 1297, because the condition on line 1296 was never true
1297 os.environ[self.ENV_VAR_VHD_TEST[self.testMode]] = "yes"
1298 util.SMlog("Setting env %s" % self.ENV_VAR_VHD_TEST[self.testMode])
1300 def _kickGC(self):
1301 # don't bother if an instance already running (this is just an
1302 # optimization to reduce the overhead of forking a new process if we
1303 # don't have to, but the process will check the lock anyways)
1304 lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid)
1305 if not lockRunning.acquireNoblock(): 1305 ↛ 1306line 1305 didn't jump to line 1306, because the condition on line 1305 was never true
1306 if cleanup.should_preempt(self.session, self.uuid):
1307 util.SMlog("Aborting currently-running coalesce of garbage VDI")
1308 try:
1309 if not cleanup.abort(self.uuid, soft=True):
1310 util.SMlog("The GC has already been scheduled to "
1311 "re-start")
1312 except util.CommandException as e:
1313 if e.code != errno.ETIMEDOUT:
1314 raise
1315 util.SMlog('failed to abort the GC')
1316 else:
1317 util.SMlog("A GC instance already running, not kicking")
1318 return
1319 else:
1320 lockRunning.release()
1322 util.SMlog("Kicking GC")
1323 cleanup.gc(self.session, self.uuid, True)
1325 def ensureCBTSpace(self):
1326 # Ensure we have space for at least one LV
1327 self._ensureSpaceAvailable(self.journaler.LV_SIZE)
1330class LVHDVDI(VDI.VDI):
1332 JRN_CLONE = "clone" # journal entry type for the clone operation
1334 def load(self, vdi_uuid):
1335 self.lock = self.sr.lock
1336 self.lvActivator = self.sr.lvActivator
1337 self.loaded = False
1338 self.vdi_type = vhdutil.VDI_TYPE_VHD
1339 if self.sr.legacyMode or util.fistpoint.is_active("xenrt_default_vdi_type_legacy"): 1339 ↛ 1341line 1339 didn't jump to line 1341, because the condition on line 1339 was never false
1340 self.vdi_type = vhdutil.VDI_TYPE_RAW
1341 self.uuid = vdi_uuid
1342 self.location = self.uuid
1343 self.exists = True
1345 if hasattr(self.sr, "vdiInfo") and self.sr.vdiInfo.get(self.uuid):
1346 self._initFromVDIInfo(self.sr.vdiInfo[self.uuid])
1347 if self.parent: 1347 ↛ 1348line 1347 didn't jump to line 1348, because the condition on line 1347 was never true
1348 self.sm_config_override['vhd-parent'] = self.parent
1349 else:
1350 self.sm_config_override['vhd-parent'] = None
1351 return
1353 # scan() didn't run: determine the type of the VDI manually
1354 if self._determineType():
1355 return
1357 # the VDI must be in the process of being created
1358 self.exists = False
1359 if "vdi_sm_config" in self.sr.srcmd.params and \ 1359 ↛ 1361line 1359 didn't jump to line 1361, because the condition on line 1359 was never true
1360 "type" in self.sr.srcmd.params["vdi_sm_config"]:
1361 type = self.sr.srcmd.params["vdi_sm_config"]["type"]
1362 if type == PARAM_RAW:
1363 self.vdi_type = vhdutil.VDI_TYPE_RAW
1364 elif type == PARAM_VHD:
1365 self.vdi_type = vhdutil.VDI_TYPE_VHD
1366 if self.sr.cmd == 'vdi_create' and self.sr.legacyMode:
1367 raise xs_errors.XenError('VDICreate', \
1368 opterr='Cannot create VHD type disk in legacy mode')
1369 else:
1370 raise xs_errors.XenError('VDICreate', opterr='bad type')
1371 self.lvname = "%s%s" % (lvhdutil.LV_PREFIX[self.vdi_type], vdi_uuid)
1372 self.path = os.path.join(self.sr.path, self.lvname)
1374 def create(self, sr_uuid, vdi_uuid, size):
1375 util.SMlog("LVHDVDI.create for %s" % self.uuid)
1376 if not self.sr.isMaster:
1377 raise xs_errors.XenError('LVMMaster')
1378 if self.exists:
1379 raise xs_errors.XenError('VDIExists')
1381 size = vhdutil.validate_and_round_vhd_size(int(size))
1383 util.SMlog("LVHDVDI.create: type = %s, %s (size=%s)" % \
1384 (self.vdi_type, self.path, size))
1385 lvSize = 0
1386 self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
1387 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1388 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT, int(size))
1389 else:
1390 if self.sr.provision == "thin":
1391 lvSize = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1392 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1393 elif self.sr.provision == "thick":
1394 lvSize = lvhdutil.calcSizeVHDLV(int(size))
1396 self.sr._ensureSpaceAvailable(lvSize)
1398 try:
1399 self.sr.lvmCache.create(self.lvname, lvSize)
1400 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1401 self.size = self.sr.lvmCache.getSize(self.lvname)
1402 else:
1403 vhdutil.create(self.path, int(size), False, lvhdutil.MSIZE_MB)
1404 self.size = vhdutil.getSizeVirt(self.path)
1405 self.sr.lvmCache.deactivateNoRefcount(self.lvname)
1406 except util.CommandException as e:
1407 util.SMlog("Unable to create VDI")
1408 self.sr.lvmCache.remove(self.lvname)
1409 raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
1411 self.utilisation = lvSize
1412 self.sm_config["vdi_type"] = self.vdi_type
1414 if not self.sr.legacyMode:
1415 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1417 self.ref = self._db_introduce()
1418 self.sr._updateStats(self.sr.uuid, self.size)
1420 vdi_info = {UUID_TAG: self.uuid,
1421 NAME_LABEL_TAG: util.to_plain_string(self.label),
1422 NAME_DESCRIPTION_TAG: util.to_plain_string(self.description),
1423 IS_A_SNAPSHOT_TAG: 0,
1424 SNAPSHOT_OF_TAG: '',
1425 SNAPSHOT_TIME_TAG: '',
1426 TYPE_TAG: self.ty,
1427 VDI_TYPE_TAG: self.vdi_type,
1428 READ_ONLY_TAG: int(self.read_only),
1429 MANAGED_TAG: int(self.managed),
1430 METADATA_OF_POOL_TAG: ''
1431 }
1433 if not self.sr.legacyMode:
1434 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1436 return VDI.VDI.get_params(self)
1438 def delete(self, sr_uuid, vdi_uuid, data_only=False):
1439 util.SMlog("LVHDVDI.delete for %s" % self.uuid)
1440 try:
1441 self._loadThis()
1442 except SR.SRException as e:
1443 # Catch 'VDI doesn't exist' exception
1444 if e.errno == 46:
1445 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1446 raise
1448 vdi_ref = self.sr.srcmd.params['vdi_ref']
1449 if not self.session.xenapi.VDI.get_managed(vdi_ref):
1450 raise xs_errors.XenError("VDIDelete", \
1451 opterr="Deleting non-leaf node not permitted")
1453 if not self.hidden:
1454 self._markHidden()
1456 if not data_only:
1457 # Remove from XAPI and delete from MGT
1458 self._db_forget()
1459 else:
1460 # If this is a data_destroy call, don't remove from XAPI db
1461 # Only delete from MGT
1462 if not self.sr.legacyMode:
1463 LVMMetadataHandler(self.sr.mdpath).deleteVdiFromMetadata(self.uuid)
1465 # deactivate here because it might be too late to do it in the "final"
1466 # step: GC might have removed the LV by then
1467 if self.sr.lvActivator.get(self.uuid, False):
1468 self.sr.lvActivator.deactivate(self.uuid, False)
1470 try:
1471 self.sr.lvmCache.remove(self.lvname)
1472 self.sr.lock.cleanup(vdi_uuid, lvhdutil.NS_PREFIX_LVM + sr_uuid)
1473 self.sr.lock.cleanupAll(vdi_uuid)
1474 except SR.SRException as e:
1475 util.SMlog(
1476 "Failed to remove the volume (maybe is leaf coalescing) "
1477 "for %s err:%d" % (self.uuid, e.errno))
1479 self.sr._updateStats(self.sr.uuid, -self.size)
1480 self.sr._kickGC()
1481 return super(LVHDVDI, self).delete(sr_uuid, vdi_uuid, data_only)
1483 def attach(self, sr_uuid, vdi_uuid):
1484 util.SMlog("LVHDVDI.attach for %s" % self.uuid)
1485 if self.sr.journaler.hasJournals(self.uuid):
1486 raise xs_errors.XenError('VDIUnavailable',
1487 opterr='Interrupted operation detected on this VDI, '
1488 'scan SR first to trigger auto-repair')
1490 writable = ('args' not in self.sr.srcmd.params) or \
1491 (self.sr.srcmd.params['args'][0] == "true")
1492 needInflate = True
1493 if self.vdi_type == vhdutil.VDI_TYPE_RAW or not writable:
1494 needInflate = False
1495 else:
1496 self._loadThis()
1497 if self.utilisation >= lvhdutil.calcSizeVHDLV(self.size):
1498 needInflate = False
1500 if needInflate:
1501 try:
1502 self._prepareThin(True)
1503 except:
1504 util.logException("attach")
1505 raise xs_errors.XenError('LVMProvisionAttach')
1507 try:
1508 return self._attach()
1509 finally:
1510 if not self.sr.lvActivator.deactivateAll():
1511 util.SMlog("Failed to deactivate LVs back (%s)" % self.uuid)
1513 def detach(self, sr_uuid, vdi_uuid):
1514 util.SMlog("LVHDVDI.detach for %s" % self.uuid)
1515 self._loadThis()
1516 already_deflated = (self.utilisation < \
1517 lvhdutil.calcSizeVHDLV(self.size))
1518 needDeflate = True
1519 if self.vdi_type == vhdutil.VDI_TYPE_RAW or already_deflated:
1520 needDeflate = False
1521 elif self.sr.provision == "thick":
1522 needDeflate = False
1523 # except for snapshots, which are always deflated
1524 if self.sr.srcmd.cmd != 'vdi_detach_from_config':
1525 vdi_ref = self.sr.srcmd.params['vdi_ref']
1526 snap = self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref)
1527 if snap:
1528 needDeflate = True
1530 if needDeflate:
1531 try:
1532 self._prepareThin(False)
1533 except:
1534 util.logException("_prepareThin")
1535 raise xs_errors.XenError('VDIUnavailable', opterr='deflate')
1537 try:
1538 self._detach()
1539 finally:
1540 if not self.sr.lvActivator.deactivateAll():
1541 raise xs_errors.XenError("SMGeneral", opterr="deactivation")
1543 # We only support offline resize
1544 def resize(self, sr_uuid, vdi_uuid, size):
1545 util.SMlog("LVHDVDI.resize for %s" % self.uuid)
1546 if not self.sr.isMaster:
1547 raise xs_errors.XenError('LVMMaster')
1549 self._loadThis()
1550 if self.hidden:
1551 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI')
1553 if size < self.size:
1554 util.SMlog('vdi_resize: shrinking not supported: ' + \
1555 '(current size: %d, new size: %d)' % (self.size, size))
1556 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed')
1558 size = vhdutil.validate_and_round_vhd_size(int(size))
1560 if size == self.size:
1561 return VDI.VDI.get_params(self)
1563 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1564 lvSizeOld = self.size
1565 lvSizeNew = util.roundup(lvutil.LVM_SIZE_INCREMENT, size)
1566 else:
1567 lvSizeOld = self.utilisation
1568 lvSizeNew = lvhdutil.calcSizeVHDLV(size)
1569 if self.sr.provision == "thin":
1570 # VDI is currently deflated, so keep it deflated
1571 lvSizeNew = lvSizeOld
1572 assert(lvSizeNew >= lvSizeOld)
1573 spaceNeeded = lvSizeNew - lvSizeOld
1574 self.sr._ensureSpaceAvailable(spaceNeeded)
1576 oldSize = self.size
1577 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
1578 self.sr.lvmCache.setSize(self.lvname, lvSizeNew)
1579 self.size = self.sr.lvmCache.getSize(self.lvname)
1580 self.utilisation = self.size
1581 else:
1582 if lvSizeNew != lvSizeOld:
1583 lvhdutil.inflate(self.sr.journaler, self.sr.uuid, self.uuid,
1584 lvSizeNew)
1585 vhdutil.setSizeVirtFast(self.path, size)
1586 self.size = vhdutil.getSizeVirt(self.path)
1587 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
1589 vdi_ref = self.sr.srcmd.params['vdi_ref']
1590 self.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
1591 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
1592 str(self.utilisation))
1593 self.sr._updateStats(self.sr.uuid, self.size - oldSize)
1594 super(LVHDVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size)
1595 return VDI.VDI.get_params(self)
1597 def clone(self, sr_uuid, vdi_uuid):
1598 return self._do_snapshot(
1599 sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE, cloneOp=True)
1601 def compose(self, sr_uuid, vdi1, vdi2):
1602 util.SMlog("LVHDSR.compose for %s -> %s" % (vdi2, vdi1))
1603 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1604 raise xs_errors.XenError('Unimplemented')
1606 parent_uuid = vdi1
1607 parent_lvname = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + parent_uuid
1608 assert(self.sr.lvmCache.checkLV(parent_lvname))
1609 parent_path = os.path.join(self.sr.path, parent_lvname)
1611 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1612 self.sr.lvActivator.activate(parent_uuid, parent_lvname, False)
1614 vhdutil.setParent(self.path, parent_path, False)
1615 vhdutil.setHidden(parent_path)
1616 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False)
1618 if not blktap2.VDI.tap_refresh(self.session, self.sr.uuid, self.uuid,
1619 True):
1620 raise util.SMException("failed to refresh VDI %s" % self.uuid)
1622 util.SMlog("Compose done")
1624 def reset_leaf(self, sr_uuid, vdi_uuid):
1625 util.SMlog("LVHDSR.reset_leaf for %s" % vdi_uuid)
1626 if self.vdi_type != vhdutil.VDI_TYPE_VHD:
1627 raise xs_errors.XenError('Unimplemented')
1629 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
1631 # safety check
1632 if not vhdutil.hasParent(self.path):
1633 raise util.SMException("ERROR: VDI %s has no parent, " + \
1634 "will not reset contents" % self.uuid)
1636 vhdutil.killData(self.path)
1638 def _attach(self):
1639 self._chainSetActive(True, True, True)
1640 if not util.pathexists(self.path):
1641 raise xs_errors.XenError('VDIUnavailable', \
1642 opterr='Could not find: %s' % self.path)
1644 if not hasattr(self, 'xenstore_data'):
1645 self.xenstore_data = {}
1647 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(self.uuid, \
1648 scsiutil.gen_synthetic_page_data(self.uuid)))
1650 self.xenstore_data['storage-type'] = 'lvm'
1651 self.xenstore_data['vdi-type'] = self.vdi_type
1653 self.attached = True
1654 self.sr.lvActivator.persist()
1655 return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
1657 def _detach(self):
1658 self._chainSetActive(False, True)
1659 self.attached = False
1661 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType,
1662 cloneOp=False, secondary=None, cbtlog=None):
1663 # If cbt enabled, save file consistency state
1664 if cbtlog is not None:
1665 if blktap2.VDI.tap_status(self.session, vdi_uuid): 1665 ↛ 1666line 1665 didn't jump to line 1666, because the condition on line 1665 was never true
1666 consistency_state = False
1667 else:
1668 consistency_state = True
1669 util.SMlog("Saving log consistency state of %s for vdi: %s" %
1670 (consistency_state, vdi_uuid))
1671 else:
1672 consistency_state = None
1674 pause_time = time.time()
1675 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 1675 ↛ 1676line 1675 didn't jump to line 1676, because the condition on line 1675 was never true
1676 raise util.SMException("failed to pause VDI %s" % vdi_uuid)
1678 snapResult = None
1679 try:
1680 snapResult = self._snapshot(snapType, cloneOp, cbtlog, consistency_state)
1681 except Exception as e1:
1682 try:
1683 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid,
1684 secondary=None)
1685 except Exception as e2:
1686 util.SMlog('WARNING: failed to clean up failed snapshot: '
1687 '%s (error ignored)' % e2)
1688 raise
1689 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
1690 unpause_time = time.time()
1691 if (unpause_time - pause_time) > LONG_SNAPTIME: 1691 ↛ 1692line 1691 didn't jump to line 1692, because the condition on line 1691 was never true
1692 util.SMlog('WARNING: snapshot paused VM for %s seconds' %
1693 (unpause_time - pause_time))
1694 return snapResult
1696 def _snapshot(self, snapType, cloneOp=False, cbtlog=None, cbt_consistency=None):
1697 util.SMlog("LVHDVDI._snapshot for %s (type %s)" % (self.uuid, snapType))
1699 if not self.sr.isMaster: 1699 ↛ 1700line 1699 didn't jump to line 1700, because the condition on line 1699 was never true
1700 raise xs_errors.XenError('LVMMaster')
1701 if self.sr.legacyMode: 1701 ↛ 1702line 1701 didn't jump to line 1702, because the condition on line 1701 was never true
1702 raise xs_errors.XenError('Unimplemented', opterr='In legacy mode')
1704 self._loadThis()
1705 if self.hidden: 1705 ↛ 1706line 1705 didn't jump to line 1706, because the condition on line 1705 was never true
1706 raise xs_errors.XenError('VDISnapshot', opterr='hidden VDI')
1708 self.sm_config = self.session.xenapi.VDI.get_sm_config( \
1709 self.sr.srcmd.params['vdi_ref'])
1710 if "type" in self.sm_config and self.sm_config['type'] == 'raw': 1710 ↛ 1711line 1710 didn't jump to line 1711, because the condition on line 1710 was never true
1711 if not util.fistpoint.is_active("testsm_clone_allow_raw"):
1712 raise xs_errors.XenError('Unimplemented', \
1713 opterr='Raw VDI, snapshot or clone not permitted')
1715 # we must activate the entire VHD chain because the real parent could
1716 # theoretically be anywhere in the chain if all VHDs under it are empty
1717 self._chainSetActive(True, False)
1718 if not util.pathexists(self.path): 1718 ↛ 1719line 1718 didn't jump to line 1719, because the condition on line 1718 was never true
1719 raise xs_errors.XenError('VDIUnavailable', \
1720 opterr='VDI unavailable: %s' % (self.path))
1722 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1722 ↛ 1730line 1722 didn't jump to line 1730, because the condition on line 1722 was never false
1723 depth = vhdutil.getDepth(self.path)
1724 if depth == -1: 1724 ↛ 1725line 1724 didn't jump to line 1725, because the condition on line 1724 was never true
1725 raise xs_errors.XenError('VDIUnavailable', \
1726 opterr='failed to get VHD depth')
1727 elif depth >= vhdutil.MAX_CHAIN_SIZE: 1727 ↛ 1728line 1727 didn't jump to line 1728, because the condition on line 1727 was never true
1728 raise xs_errors.XenError('SnapshotChainTooLong')
1730 self.issnap = self.session.xenapi.VDI.get_is_a_snapshot( \
1731 self.sr.srcmd.params['vdi_ref'])
1733 fullpr = lvhdutil.calcSizeVHDLV(self.size)
1734 thinpr = util.roundup(lvutil.LVM_SIZE_INCREMENT, \
1735 vhdutil.calcOverheadEmpty(lvhdutil.MSIZE))
1736 lvSizeOrig = thinpr
1737 lvSizeClon = thinpr
1739 hostRefs = []
1740 if self.sr.cmd == "vdi_snapshot":
1741 hostRefs = util.get_hosts_attached_on(self.session, [self.uuid])
1742 if hostRefs: 1742 ↛ 1744line 1742 didn't jump to line 1744, because the condition on line 1742 was never false
1743 lvSizeOrig = fullpr
1744 if self.sr.provision == "thick": 1744 ↛ 1750line 1744 didn't jump to line 1750, because the condition on line 1744 was never false
1745 if not self.issnap: 1745 ↛ 1746line 1745 didn't jump to line 1746, because the condition on line 1745 was never true
1746 lvSizeOrig = fullpr
1747 if self.sr.cmd != "vdi_snapshot":
1748 lvSizeClon = fullpr
1750 if (snapType == VDI.SNAPSHOT_SINGLE or 1750 ↛ 1752line 1750 didn't jump to line 1752, because the condition on line 1750 was never true
1751 snapType == VDI.SNAPSHOT_INTERNAL):
1752 lvSizeClon = 0
1754 # the space required must include 2 journal LVs: a clone journal and an
1755 # inflate journal (for the failure handling
1756 size_req = lvSizeOrig + lvSizeClon + 2 * self.sr.journaler.LV_SIZE
1757 lvSizeBase = self.size
1758 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1758 ↛ 1762line 1758 didn't jump to line 1762, because the condition on line 1758 was never false
1759 lvSizeBase = util.roundup(lvutil.LVM_SIZE_INCREMENT,
1760 vhdutil.getSizePhys(self.path))
1761 size_req -= (self.utilisation - lvSizeBase)
1762 self.sr._ensureSpaceAvailable(size_req)
1764 if hostRefs:
1765 self.sr._updateSlavesPreClone(hostRefs, self.lvname)
1767 baseUuid = util.gen_uuid()
1768 origUuid = self.uuid
1769 clonUuid = ""
1770 if snapType == VDI.SNAPSHOT_DOUBLE: 1770 ↛ 1772line 1770 didn't jump to line 1772, because the condition on line 1770 was never false
1771 clonUuid = util.gen_uuid()
1772 jval = "%s_%s" % (baseUuid, clonUuid)
1773 self.sr.journaler.create(self.JRN_CLONE, origUuid, jval)
1774 util.fistpoint.activate("LVHDRT_clone_vdi_after_create_journal", self.sr.uuid)
1776 try:
1777 # self becomes the "base vdi"
1778 origOldLV = self.lvname
1779 baseLV = lvhdutil.LV_PREFIX[self.vdi_type] + baseUuid
1780 self.sr.lvmCache.rename(self.lvname, baseLV)
1781 self.sr.lvActivator.replace(self.uuid, baseUuid, baseLV, False)
1782 RefCounter.set(baseUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1783 self.uuid = baseUuid
1784 self.lvname = baseLV
1785 self.path = os.path.join(self.sr.path, baseLV)
1786 self.label = "base copy"
1787 self.read_only = True
1788 self.location = self.uuid
1789 self.managed = False
1791 # shrink the base copy to the minimum - we do it before creating
1792 # the snapshot volumes to avoid requiring double the space
1793 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 1793 ↛ 1796line 1793 didn't jump to line 1796, because the condition on line 1793 was never false
1794 lvhdutil.deflate(self.sr.lvmCache, self.lvname, lvSizeBase)
1795 self.utilisation = lvSizeBase
1796 util.fistpoint.activate("LVHDRT_clone_vdi_after_shrink_parent", self.sr.uuid)
1798 snapVDI = self._createSnap(origUuid, lvSizeOrig, False)
1799 util.fistpoint.activate("LVHDRT_clone_vdi_after_first_snap", self.sr.uuid)
1800 snapVDI2 = None
1801 if snapType == VDI.SNAPSHOT_DOUBLE: 1801 ↛ 1807line 1801 didn't jump to line 1807, because the condition on line 1801 was never false
1802 snapVDI2 = self._createSnap(clonUuid, lvSizeClon, True)
1803 # If we have CBT enabled on the VDI,
1804 # set CBT status for the new snapshot disk
1805 if cbtlog:
1806 snapVDI2.cbt_enabled = True
1807 util.fistpoint.activate("LVHDRT_clone_vdi_after_second_snap", self.sr.uuid)
1809 # note: it is important to mark the parent hidden only AFTER the
1810 # new VHD children have been created, which are referencing it;
1811 # otherwise we would introduce a race with GC that could reclaim
1812 # the parent before we snapshot it
1813 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 1813 ↛ 1814line 1813 didn't jump to line 1814, because the condition on line 1813 was never true
1814 self.sr.lvmCache.setHidden(self.lvname)
1815 else:
1816 vhdutil.setHidden(self.path)
1817 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_hidden", self.sr.uuid)
1819 # set the base copy to ReadOnly
1820 self.sr.lvmCache.setReadonly(self.lvname, True)
1821 util.fistpoint.activate("LVHDRT_clone_vdi_after_parent_ro", self.sr.uuid)
1823 if hostRefs:
1824 self.sr._updateSlavesOnClone(hostRefs, origOldLV,
1825 snapVDI.lvname, self.uuid, self.lvname)
1827 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE)
1828 if snapType == VDI.SNAPSHOT_DOUBLE and cbtlog:
1829 snapVDI._cbt_snapshot(clonUuid, cbt_consistency)
1830 if hostRefs: 1830 ↛ 1844line 1830 didn't jump to line 1844, because the condition on line 1830 was never false
1831 cbtlog_file = self._get_cbt_logname(snapVDI.uuid)
1832 try:
1833 self.sr._updateSlavesOnCBTClone(hostRefs, cbtlog_file)
1834 except:
1835 alert_name = "VDI_CBT_SNAPSHOT_FAILED"
1836 alert_str = ("Creating CBT snapshot for {} failed"
1837 .format(snapVDI.uuid))
1838 snapVDI._disable_cbt_on_error(alert_name, alert_str)
1839 pass
1841 except (util.SMException, XenAPI.Failure) as e:
1842 util.logException("LVHDVDI._snapshot")
1843 self._failClone(origUuid, jval, str(e))
1844 util.fistpoint.activate("LVHDRT_clone_vdi_before_remove_journal", self.sr.uuid)
1846 self.sr.journaler.remove(self.JRN_CLONE, origUuid)
1848 return self._finishSnapshot(snapVDI, snapVDI2, hostRefs, cloneOp, snapType)
1850 def _createSnap(self, snapUuid, snapSizeLV, isNew):
1851 """Snapshot self and return the snapshot VDI object"""
1852 snapLV = lvhdutil.LV_PREFIX[vhdutil.VDI_TYPE_VHD] + snapUuid
1853 snapPath = os.path.join(self.sr.path, snapLV)
1854 self.sr.lvmCache.create(snapLV, int(snapSizeLV))
1855 util.fistpoint.activate("LVHDRT_clone_vdi_after_lvcreate", self.sr.uuid)
1856 if isNew:
1857 RefCounter.set(snapUuid, 1, 0, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1858 self.sr.lvActivator.add(snapUuid, snapLV, False)
1859 parentRaw = (self.vdi_type == vhdutil.VDI_TYPE_RAW)
1860 vhdutil.snapshot(snapPath, self.path, parentRaw, lvhdutil.MSIZE_MB)
1861 snapParent = vhdutil.getParent(snapPath, lvhdutil.extractUuid)
1863 snapVDI = LVHDVDI(self.sr, snapUuid)
1864 snapVDI.read_only = False
1865 snapVDI.location = snapUuid
1866 snapVDI.size = self.size
1867 snapVDI.utilisation = snapSizeLV
1868 snapVDI.sm_config = dict()
1869 for key, val in self.sm_config.items(): 1869 ↛ 1870line 1869 didn't jump to line 1870, because the loop on line 1869 never started
1870 if key not in [
1871 "type", "vdi_type", "vhd-parent", "paused", "relinking", "activating"] and \
1872 not key.startswith("host_"):
1873 snapVDI.sm_config[key] = val
1874 snapVDI.sm_config["vdi_type"] = vhdutil.VDI_TYPE_VHD
1875 snapVDI.sm_config["vhd-parent"] = snapParent
1876 snapVDI.lvname = snapLV
1877 return snapVDI
1879 def _finishSnapshot(self, snapVDI, snapVDI2, hostRefs, cloneOp=False, snapType=None):
1880 if snapType is not VDI.SNAPSHOT_INTERNAL: 1880 ↛ 1882line 1880 didn't jump to line 1882, because the condition on line 1880 was never false
1881 self.sr._updateStats(self.sr.uuid, self.size)
1882 basePresent = True
1884 # Verify parent locator field of both children and delete basePath if
1885 # unused
1886 snapParent = snapVDI.sm_config["vhd-parent"]
1887 snap2Parent = ""
1888 if snapVDI2: 1888 ↛ 1890line 1888 didn't jump to line 1890, because the condition on line 1888 was never false
1889 snap2Parent = snapVDI2.sm_config["vhd-parent"]
1890 if snapParent != self.uuid and \ 1890 ↛ 1917line 1890 didn't jump to line 1917, because the condition on line 1890 was never false
1891 (not snapVDI2 or snap2Parent != self.uuid):
1892 util.SMlog("%s != %s != %s => deleting unused base %s" % \
1893 (snapParent, self.uuid, snap2Parent, self.lvname))
1894 RefCounter.put(self.uuid, False, lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
1895 self.sr.lvmCache.remove(self.lvname)
1896 self.sr.lvActivator.remove(self.uuid, False)
1897 if hostRefs:
1898 self.sr._updateSlavesOnRemove(hostRefs, self.uuid, self.lvname)
1899 basePresent = False
1900 else:
1901 # assign the _binary_ refcount of the original VDI to the new base
1902 # VDI (but as the normal refcount, since binary refcounts are only
1903 # for leaf nodes). The normal refcount of the child is not
1904 # transferred to to the base VDI because normal refcounts are
1905 # incremented and decremented individually, and not based on the
1906 # VHD chain (i.e., the child's normal refcount will be decremented
1907 # independently of its parent situation). Add 1 for this clone op.
1908 # Note that we do not need to do protect the refcount operations
1909 # below with per-VDI locking like we do in lvutil because at this
1910 # point we have exclusive access to the VDIs involved. Other SM
1911 # operations are serialized by the Agent or with the SR lock, and
1912 # any coalesce activations are serialized with the SR lock. (The
1913 # coalesce activates the coalesced VDI pair in the beginning, which
1914 # cannot affect the VDIs here because they cannot possibly be
1915 # involved in coalescing at this point, and at the relinkSkip step
1916 # that activates the children, which takes the SR lock.)
1917 ns = lvhdutil.NS_PREFIX_LVM + self.sr.uuid
1918 (cnt, bcnt) = RefCounter.check(snapVDI.uuid, ns)
1919 RefCounter.set(self.uuid, bcnt + 1, 0, ns)
1921 # the "paused" and "host_*" sm-config keys are special and must stay on
1922 # the leaf without being inherited by anyone else
1923 for key in [x for x in self.sm_config.keys() if x == "paused" or x.startswith("host_")]: 1923 ↛ 1924line 1923 didn't jump to line 1924, because the loop on line 1923 never started
1924 snapVDI.sm_config[key] = self.sm_config[key]
1925 del self.sm_config[key]
1927 # Introduce any new VDI records & update the existing one
1928 type = self.session.xenapi.VDI.get_type( \
1929 self.sr.srcmd.params['vdi_ref'])
1930 if snapVDI2: 1930 ↛ 1972line 1930 didn't jump to line 1972, because the condition on line 1930 was never false
1931 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1932 vdiRef = snapVDI2._db_introduce()
1933 if cloneOp:
1934 vdi_info = {UUID_TAG: snapVDI2.uuid,
1935 NAME_LABEL_TAG: util.to_plain_string( \
1936 self.session.xenapi.VDI.get_name_label( \
1937 self.sr.srcmd.params['vdi_ref'])),
1938 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1939 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1940 IS_A_SNAPSHOT_TAG: 0,
1941 SNAPSHOT_OF_TAG: '',
1942 SNAPSHOT_TIME_TAG: '',
1943 TYPE_TAG: type,
1944 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1945 READ_ONLY_TAG: 0,
1946 MANAGED_TAG: int(snapVDI2.managed),
1947 METADATA_OF_POOL_TAG: ''
1948 }
1949 else:
1950 util.SMlog("snapshot VDI params: %s" % \
1951 self.session.xenapi.VDI.get_snapshot_time(vdiRef))
1952 vdi_info = {UUID_TAG: snapVDI2.uuid,
1953 NAME_LABEL_TAG: util.to_plain_string( \
1954 self.session.xenapi.VDI.get_name_label( \
1955 self.sr.srcmd.params['vdi_ref'])),
1956 NAME_DESCRIPTION_TAG: util.to_plain_string( \
1957 self.session.xenapi.VDI.get_name_description(self.sr.srcmd.params['vdi_ref'])),
1958 IS_A_SNAPSHOT_TAG: 1,
1959 SNAPSHOT_OF_TAG: snapVDI.uuid,
1960 SNAPSHOT_TIME_TAG: '',
1961 TYPE_TAG: type,
1962 VDI_TYPE_TAG: snapVDI2.sm_config['vdi_type'],
1963 READ_ONLY_TAG: 0,
1964 MANAGED_TAG: int(snapVDI2.managed),
1965 METADATA_OF_POOL_TAG: ''
1966 }
1968 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1969 util.SMlog("vdi_clone: introduced 2nd snap VDI: %s (%s)" % \
1970 (vdiRef, snapVDI2.uuid))
1972 if basePresent: 1972 ↛ 1973line 1972 didn't jump to line 1973, because the condition on line 1972 was never true
1973 LVMMetadataHandler(self.sr.mdpath).ensureSpaceIsAvailableForVdis(1)
1974 vdiRef = self._db_introduce()
1975 vdi_info = {UUID_TAG: self.uuid,
1976 NAME_LABEL_TAG: self.label,
1977 NAME_DESCRIPTION_TAG: self.description,
1978 IS_A_SNAPSHOT_TAG: 0,
1979 SNAPSHOT_OF_TAG: '',
1980 SNAPSHOT_TIME_TAG: '',
1981 TYPE_TAG: type,
1982 VDI_TYPE_TAG: self.sm_config['vdi_type'],
1983 READ_ONLY_TAG: 1,
1984 MANAGED_TAG: 0,
1985 METADATA_OF_POOL_TAG: ''
1986 }
1988 LVMMetadataHandler(self.sr.mdpath).addVdi(vdi_info)
1989 util.SMlog("vdi_clone: introduced base VDI: %s (%s)" % \
1990 (vdiRef, self.uuid))
1992 # Update the original record
1993 vdi_ref = self.sr.srcmd.params['vdi_ref']
1994 self.session.xenapi.VDI.set_sm_config(vdi_ref, snapVDI.sm_config)
1995 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref, \
1996 str(snapVDI.utilisation))
1998 # Return the info on the new snap VDI
1999 snap = snapVDI2
2000 if not snap: 2000 ↛ 2001line 2000 didn't jump to line 2001, because the condition on line 2000 was never true
2001 snap = self
2002 if not basePresent:
2003 # a single-snapshot of an empty VDI will be a noop, resulting
2004 # in no new VDIs, so return the existing one. The GC wouldn't
2005 # normally try to single-snapshot an empty VHD of course, but
2006 # if an external snapshot operation manages to sneak in right
2007 # before a snapshot-coalesce phase, we would get here
2008 snap = snapVDI
2009 return snap.get_params()
2011 def _initFromVDIInfo(self, vdiInfo):
2012 self.vdi_type = vdiInfo.vdiType
2013 self.lvname = vdiInfo.lvName
2014 self.size = vdiInfo.sizeVirt
2015 self.utilisation = vdiInfo.sizeLV
2016 self.hidden = vdiInfo.hidden
2017 if self.hidden: 2017 ↛ 2018line 2017 didn't jump to line 2018, because the condition on line 2017 was never true
2018 self.managed = False
2019 self.active = vdiInfo.lvActive
2020 self.readonly = vdiInfo.lvReadonly
2021 self.parent = vdiInfo.parentUuid
2022 self.path = os.path.join(self.sr.path, self.lvname)
2023 if hasattr(self, "sm_config_override"): 2023 ↛ 2026line 2023 didn't jump to line 2026, because the condition on line 2023 was never false
2024 self.sm_config_override["vdi_type"] = self.vdi_type
2025 else:
2026 self.sm_config_override = {'vdi_type': self.vdi_type}
2027 self.loaded = True
2029 def _initFromLVInfo(self, lvInfo):
2030 self.vdi_type = lvInfo.vdiType
2031 self.lvname = lvInfo.name
2032 self.size = lvInfo.size
2033 self.utilisation = lvInfo.size
2034 self.hidden = lvInfo.hidden
2035 self.active = lvInfo.active
2036 self.readonly = lvInfo.readonly
2037 self.parent = ''
2038 self.path = os.path.join(self.sr.path, self.lvname)
2039 if hasattr(self, "sm_config_override"): 2039 ↛ 2042line 2039 didn't jump to line 2042, because the condition on line 2039 was never false
2040 self.sm_config_override["vdi_type"] = self.vdi_type
2041 else:
2042 self.sm_config_override = {'vdi_type': self.vdi_type}
2043 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 2043 ↛ 2044line 2043 didn't jump to line 2044, because the condition on line 2043 was never true
2044 self.loaded = True
2046 def _initFromVHDInfo(self, vhdInfo):
2047 self.size = vhdInfo.sizeVirt
2048 self.parent = vhdInfo.parentUuid
2049 self.hidden = vhdInfo.hidden
2050 self.loaded = True
2052 def _determineType(self):
2053 """Determine whether this is a raw or a VHD VDI"""
2054 if "vdi_ref" in self.sr.srcmd.params: 2054 ↛ 2067line 2054 didn't jump to line 2067, because the condition on line 2054 was never false
2055 vdi_ref = self.sr.srcmd.params["vdi_ref"]
2056 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
2057 if sm_config.get("vdi_type"): 2057 ↛ 2058line 2057 didn't jump to line 2058, because the condition on line 2057 was never true
2058 self.vdi_type = sm_config["vdi_type"]
2059 prefix = lvhdutil.LV_PREFIX[self.vdi_type]
2060 self.lvname = "%s%s" % (prefix, self.uuid)
2061 self.path = os.path.join(self.sr.path, self.lvname)
2062 self.sm_config_override = sm_config
2063 return True
2065 # LVM commands can be costly, so check the file directly first in case
2066 # the LV is active
2067 found = False
2068 for t in lvhdutil.VDI_TYPES: 2068 ↛ 2069line 2068 didn't jump to line 2069, because the loop on line 2068 never started
2069 lvname = "%s%s" % (lvhdutil.LV_PREFIX[t], self.uuid)
2070 path = os.path.join(self.sr.path, lvname)
2071 if util.pathexists(path):
2072 if found:
2073 raise xs_errors.XenError('VDILoad',
2074 opterr="multiple VDI's: uuid %s" % self.uuid)
2075 found = True
2076 self.vdi_type = t
2077 self.lvname = lvname
2078 self.path = path
2079 if found: 2079 ↛ 2080line 2079 didn't jump to line 2080, because the condition on line 2079 was never true
2080 return True
2082 # now list all LV's
2083 if not lvutil._checkVG(self.sr.vgname): 2083 ↛ 2085line 2083 didn't jump to line 2085, because the condition on line 2083 was never true
2084 # when doing attach_from_config, the VG won't be there yet
2085 return False
2087 lvs = lvhdutil.getLVInfo(self.sr.lvmCache)
2088 if lvs.get(self.uuid):
2089 self._initFromLVInfo(lvs[self.uuid])
2090 return True
2091 return False
2093 def _loadThis(self):
2094 """Load VDI info for this VDI and activate the LV if it's VHD. We
2095 don't do it in VDI.load() because not all VDI operations need it."""
2096 if self.loaded: 2096 ↛ 2097line 2096 didn't jump to line 2097, because the condition on line 2096 was never true
2097 if self.vdi_type == vhdutil.VDI_TYPE_VHD:
2098 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2099 return
2100 try:
2101 lvs = lvhdutil.getLVInfo(self.sr.lvmCache, self.lvname)
2102 except util.CommandException as e:
2103 raise xs_errors.XenError('VDIUnavailable',
2104 opterr='%s (LV scan error)' % os.strerror(abs(e.code)))
2105 if not lvs.get(self.uuid): 2105 ↛ 2106line 2105 didn't jump to line 2106, because the condition on line 2105 was never true
2106 raise xs_errors.XenError('VDIUnavailable', opterr='LV not found')
2107 self._initFromLVInfo(lvs[self.uuid])
2108 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2108 ↛ 2115line 2108 didn't jump to line 2115, because the condition on line 2108 was never false
2109 self.sr.lvActivator.activate(self.uuid, self.lvname, False)
2110 vhdInfo = vhdutil.getVHDInfo(self.path, lvhdutil.extractUuid, False)
2111 if not vhdInfo: 2111 ↛ 2112line 2111 didn't jump to line 2112, because the condition on line 2111 was never true
2112 raise xs_errors.XenError('VDIUnavailable', \
2113 opterr='getVHDInfo failed')
2114 self._initFromVHDInfo(vhdInfo)
2115 self.loaded = True
2117 def _chainSetActive(self, active, binary, persistent=False):
2118 if binary: 2118 ↛ 2119line 2118 didn't jump to line 2119, because the condition on line 2118 was never true
2119 (count, bcount) = RefCounter.checkLocked(self.uuid,
2120 lvhdutil.NS_PREFIX_LVM + self.sr.uuid)
2121 if (active and bcount > 0) or (not active and bcount == 0):
2122 return # this is a redundant activation/deactivation call
2124 vdiList = {self.uuid: self.lvname}
2125 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 2125 ↛ 2128line 2125 didn't jump to line 2128, because the condition on line 2125 was never false
2126 vdiList = vhdutil.getParentChain(self.lvname,
2127 lvhdutil.extractUuid, self.sr.vgname)
2128 for uuid, lvName in vdiList.items(): 2128 ↛ 2129line 2128 didn't jump to line 2129, because the loop on line 2128 never started
2129 binaryParam = binary
2130 if uuid != self.uuid:
2131 binaryParam = False # binary param only applies to leaf nodes
2132 if active:
2133 self.sr.lvActivator.activate(uuid, lvName, binaryParam,
2134 persistent)
2135 else:
2136 # just add the LVs for deactivation in the final (cleanup)
2137 # step. The LVs must not have been activated during the current
2138 # operation
2139 self.sr.lvActivator.add(uuid, lvName, binaryParam)
2141 def _failClone(self, uuid, jval, msg):
2142 try:
2143 self.sr._handleInterruptedCloneOp(uuid, jval, True)
2144 self.sr.journaler.remove(self.JRN_CLONE, uuid)
2145 except Exception as e:
2146 util.SMlog('WARNING: failed to clean up failed snapshot: ' \
2147 ' %s (error ignored)' % e)
2148 raise xs_errors.XenError('VDIClone', opterr=msg)
2150 def _markHidden(self):
2151 if self.vdi_type == vhdutil.VDI_TYPE_RAW:
2152 self.sr.lvmCache.setHidden(self.lvname)
2153 else:
2154 vhdutil.setHidden(self.path)
2155 self.hidden = 1
2157 def _prepareThin(self, attach):
2158 origUtilisation = self.sr.lvmCache.getSize(self.lvname)
2159 if self.sr.isMaster:
2160 # the master can prepare the VDI locally
2161 if attach:
2162 lvhdutil.attachThin(self.sr.journaler, self.sr.uuid, self.uuid)
2163 else:
2164 lvhdutil.detachThin(self.session, self.sr.lvmCache,
2165 self.sr.uuid, self.uuid)
2166 else:
2167 fn = "attach"
2168 if not attach:
2169 fn = "detach"
2170 pools = self.session.xenapi.pool.get_all()
2171 master = self.session.xenapi.pool.get_master(pools[0])
2172 rv = self.session.xenapi.host.call_plugin(
2173 master, self.sr.THIN_PLUGIN, fn,
2174 {"srUuid": self.sr.uuid, "vdiUuid": self.uuid})
2175 util.SMlog("call-plugin returned: %s" % rv)
2176 if not rv:
2177 raise Exception('plugin %s failed' % self.sr.THIN_PLUGIN)
2178 # refresh to pick up the size change on this slave
2179 self.sr.lvmCache.activateNoRefcount(self.lvname, True)
2181 self.utilisation = self.sr.lvmCache.getSize(self.lvname)
2182 if origUtilisation != self.utilisation:
2183 vdi_ref = self.sr.srcmd.params['vdi_ref']
2184 self.session.xenapi.VDI.set_physical_utilisation(vdi_ref,
2185 str(self.utilisation))
2186 stats = lvutil._getVGstats(self.sr.vgname)
2187 sr_utilisation = stats['physical_utilisation']
2188 self.session.xenapi.SR.set_physical_utilisation(self.sr.sr_ref,
2189 str(sr_utilisation))
2191 def update(self, sr_uuid, vdi_uuid):
2192 if self.sr.legacyMode:
2193 return
2195 #Synch the name_label of this VDI on storage with the name_label in XAPI
2196 vdi_ref = self.session.xenapi.VDI.get_by_uuid(self.uuid)
2197 update_map = {}
2198 update_map[METADATA_UPDATE_OBJECT_TYPE_TAG] = \
2199 METADATA_OBJECT_TYPE_VDI
2200 update_map[UUID_TAG] = self.uuid
2201 update_map[NAME_LABEL_TAG] = util.to_plain_string( \
2202 self.session.xenapi.VDI.get_name_label(vdi_ref))
2203 update_map[NAME_DESCRIPTION_TAG] = util.to_plain_string( \
2204 self.session.xenapi.VDI.get_name_description(vdi_ref))
2205 update_map[SNAPSHOT_TIME_TAG] = \
2206 self.session.xenapi.VDI.get_snapshot_time(vdi_ref)
2207 update_map[METADATA_OF_POOL_TAG] = \
2208 self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
2209 LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
2211 def _ensure_cbt_space(self):
2212 self.sr.ensureCBTSpace()
2214 def _create_cbt_log(self):
2215 logname = self._get_cbt_logname(self.uuid)
2216 self.sr.lvmCache.create(logname, self.sr.journaler.LV_SIZE, CBTLOG_TAG)
2217 logpath = super(LVHDVDI, self)._create_cbt_log()
2218 self.sr.lvmCache.deactivateNoRefcount(logname)
2219 return logpath
2221 def _delete_cbt_log(self):
2222 logpath = self._get_cbt_logpath(self.uuid)
2223 if self._cbt_log_exists(logpath):
2224 logname = self._get_cbt_logname(self.uuid)
2225 self.sr.lvmCache.remove(logname)
2227 def _rename(self, oldpath, newpath):
2228 oldname = os.path.basename(oldpath)
2229 newname = os.path.basename(newpath)
2230 self.sr.lvmCache.rename(oldname, newname)
2232 def _activate_cbt_log(self, lv_name):
2233 self.sr.lvmCache.refresh()
2234 if not self.sr.lvmCache.is_active(lv_name): 2234 ↛ 2235line 2234 didn't jump to line 2235, because the condition on line 2234 was never true
2235 try:
2236 self.sr.lvmCache.activateNoRefcount(lv_name)
2237 return True
2238 except Exception as e:
2239 util.SMlog("Exception in _activate_cbt_log, "
2240 "Error: %s." % str(e))
2241 raise
2242 else:
2243 return False
2245 def _deactivate_cbt_log(self, lv_name):
2246 try:
2247 self.sr.lvmCache.deactivateNoRefcount(lv_name)
2248 except Exception as e:
2249 util.SMlog("Exception in _deactivate_cbt_log, Error: %s." % str(e))
2250 raise
2252 def _cbt_log_exists(self, logpath):
2253 return lvutil.exists(logpath)
2255if __name__ == '__main__': 2255 ↛ 2256line 2255 didn't jump to line 2256, because the condition on line 2255 was never true
2256 SRCommand.run(LVHDSR, DRIVER_INFO)
2257else:
2258 SR.registerSR(LVHDSR)