Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1#!/usr/bin/python3 

2# 

3# Copyright (C) Citrix Systems Inc. 

4# 

5# This program is free software; you can redistribute it and/or modify 

6# it under the terms of the GNU Lesser General Public License as published 

7# by the Free Software Foundation; version 2.1 only. 

8# 

9# This program is distributed in the hope that it will be useful, 

10# but WITHOUT ANY WARRANTY; without even the implied warranty of 

11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

12# GNU Lesser General Public License for more details. 

13# 

14# You should have received a copy of the GNU Lesser General Public License 

15# along with this program; if not, write to the Free Software Foundation, Inc., 

16# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 

17# 

18# FileSR: local-file storage repository 

19 

20from sm_typing import Dict, Optional, List, override 

21 

22import SR 

23import VDI 

24import SRCommand 

25import util 

26import scsiutil 

27import vhdutil 

28import os 

29import errno 

30import xs_errors 

31import cleanup 

32import blktap2 

33import time 

34import glob 

35from uuid import uuid4 

36from lock import Lock 

37import xmlrpc.client 

38import XenAPI # pylint: disable=import-error 

39from constants import CBTLOG_TAG 

40 

41geneology: Dict[str, List[str]] = {} 

42CAPABILITIES = ["SR_PROBE", "SR_UPDATE", \ 

43 "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", \ 

44 "VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "VDI_MIRROR", 

45 "VDI_GENERATE_CONFIG", "ATOMIC_PAUSE", "VDI_CONFIG_CBT", 

46 "VDI_ACTIVATE", "VDI_DEACTIVATE", "THIN_PROVISIONING"] 

47 

48CONFIGURATION = [['location', 'local directory path (required)']] 

49 

50DRIVER_INFO = { 

51 'name': 'Local Path VHD', 

52 'description': 'SR plugin which represents disks as VHD files stored on a local path', 

53 'vendor': 'Citrix Systems Inc', 

54 'copyright': '(C) 2008 Citrix Systems Inc', 

55 'driver_version': '1.0', 

56 'required_api_version': '1.0', 

57 'capabilities': CAPABILITIES, 

58 'configuration': CONFIGURATION 

59 } 

60 

61JOURNAL_FILE_PREFIX = ".journal-" 

62 

63OPS_EXCLUSIVE = [ 

64 "sr_create", "sr_delete", "sr_probe", "sr_attach", "sr_detach", 

65 "sr_scan", "vdi_init", "vdi_create", "vdi_delete", "vdi_attach", 

66 "vdi_detach", "vdi_resize_online", "vdi_snapshot", "vdi_clone"] 

67 

68DRIVER_CONFIG = {"ATTACH_FROM_CONFIG_WITH_TAPDISK": True} 

69 

70 

71class FileSR(SR.SR): 

72 """Local file storage repository""" 

73 

74 SR_TYPE = "file" 

75 

76 @override 

77 @staticmethod 

78 def handles(srtype) -> bool: 

79 return srtype == 'file' 

80 

81 def _check_o_direct(self): 

82 if self.sr_ref and self.session is not None: 

83 other_config = self.session.xenapi.SR.get_other_config(self.sr_ref) 

84 o_direct = other_config.get("o_direct") 

85 self.o_direct = o_direct is not None and o_direct == "true" 

86 else: 

87 self.o_direct = True 

88 

89 def __init__(self, srcmd, sr_uuid): 

90 # We call SR.SR.__init__ explicitly because 

91 # "super" sometimes failed due to circular imports 

92 SR.SR.__init__(self, srcmd, sr_uuid) 

93 self._check_o_direct() 

94 

95 @override 

96 def load(self, sr_uuid) -> None: 

97 self.ops_exclusive = OPS_EXCLUSIVE 

98 self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) 

99 self.sr_vditype = vhdutil.VDI_TYPE_VHD 

100 if 'location' not in self.dconf or not self.dconf['location']: 100 ↛ 101line 100 didn't jump to line 101, because the condition on line 100 was never true

101 raise xs_errors.XenError('ConfigLocationMissing') 

102 self.remotepath = self.dconf['location'] 

103 self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) 

104 self.linkpath = self.path 

105 self.mountpoint = self.path 

106 self.attached = False 

107 self.driver_config = DRIVER_CONFIG 

108 

109 @override 

110 def create(self, sr_uuid, size) -> None: 

111 """ Create the SR. The path must not already exist, or if it does,  

112 it must be empty. (This accounts for the case where the user has 

113 mounted a device onto a directory manually and want to use this as the 

114 root of a file-based SR.) """ 

115 try: 

116 if util.ioretry(lambda: util.pathexists(self.remotepath)): 116 ↛ 117line 116 didn't jump to line 117, because the condition on line 116 was never true

117 if len(util.ioretry(lambda: util.listdir(self.remotepath))) != 0: 

118 raise xs_errors.XenError('SRExists') 

119 else: 

120 try: 

121 util.ioretry(lambda: os.mkdir(self.remotepath)) 

122 except util.CommandException as inst: 

123 if inst.code == errno.EEXIST: 

124 raise xs_errors.XenError('SRExists') 

125 else: 

126 raise xs_errors.XenError('FileSRCreate', \ 

127 opterr='directory creation failure %d' \ 

128 % inst.code) 

129 except: 

130 raise xs_errors.XenError('FileSRCreate') 

131 

132 @override 

133 def delete(self, sr_uuid) -> None: 

134 self.attach(sr_uuid) 

135 cleanup.gc_force(self.session, self.uuid) 

136 

137 # check to make sure no VDIs are present; then remove old 

138 # files that are non VDI's 

139 try: 

140 if util.ioretry(lambda: util.pathexists(self.path)): 

141 #Load the VDI list 

142 self._loadvdis() 

143 for uuid in self.vdis: 

144 if not self.vdis[uuid].deleted: 

145 raise xs_errors.XenError('SRNotEmpty', \ 

146 opterr='VDIs still exist in SR') 

147 

148 # remove everything else, there are no vdi's 

149 for name in util.ioretry(lambda: util.listdir(self.path)): 

150 fullpath = os.path.join(self.path, name) 

151 try: 

152 util.ioretry(lambda: os.unlink(fullpath)) 

153 except util.CommandException as inst: 

154 if inst.code != errno.ENOENT and \ 

155 inst.code != errno.EISDIR: 

156 raise xs_errors.XenError('FileSRDelete', \ 

157 opterr='failed to remove %s error %d' \ 

158 % (fullpath, inst.code)) 

159 self.detach(sr_uuid) 

160 except util.CommandException as inst: 

161 self.detach(sr_uuid) 

162 raise xs_errors.XenError('FileSRDelete', \ 

163 opterr='error %d' % inst.code) 

164 

165 @override 

166 def attach(self, sr_uuid) -> None: 

167 self.attach_and_bind(sr_uuid) 

168 

169 def attach_and_bind(self, sr_uuid, bind=True) -> None: 

170 if not self._checkmount(): 

171 try: 

172 util.ioretry(lambda: util.makedirs(self.path, mode=0o700)) 

173 except util.CommandException as inst: 

174 if inst.code != errno.EEXIST: 

175 raise xs_errors.XenError("FileSRCreate", \ 

176 opterr='fail to create mount point. Errno is %s' % inst.code) 

177 try: 

178 cmd = ["mount", self.remotepath, self.path] 

179 if bind: 

180 cmd.append("--bind") 

181 util.pread(cmd) 

182 os.chmod(self.path, mode=0o0700) 

183 except util.CommandException as inst: 

184 raise xs_errors.XenError('FileSRCreate', \ 

185 opterr='fail to mount FileSR. Errno is %s' % inst.code) 

186 self.attached = True 

187 

188 @override 

189 def detach(self, sr_uuid) -> None: 

190 if self._checkmount(): 

191 try: 

192 util.SMlog("Aborting GC/coalesce") 

193 cleanup.abort(self.uuid) 

194 os.chdir(SR.MOUNT_BASE) 

195 util.pread(["umount", self.path]) 

196 os.rmdir(self.path) 

197 except Exception as e: 

198 raise xs_errors.XenError('SRInUse', opterr=str(e)) 

199 self.attached = False 

200 

201 @override 

202 def scan(self, sr_uuid) -> None: 

203 if not self._checkmount(): 

204 raise xs_errors.XenError('SRUnavailable', \ 

205 opterr='no such directory %s' % self.path) 

206 

207 if not self.vdis: 207 ↛ 210line 207 didn't jump to line 210, because the condition on line 207 was never false

208 self._loadvdis() 

209 

210 if not self.passthrough: 

211 self.physical_size = self._getsize() 

212 self.physical_utilisation = self._getutilisation() 

213 

214 for uuid in list(self.vdis.keys()): 

215 if self.vdis[uuid].deleted: 215 ↛ 216line 215 didn't jump to line 216, because the condition on line 215 was never true

216 del self.vdis[uuid] 

217 

218 # CA-15607: make sure we are robust to the directory being unmounted beneath 

219 # us (eg by a confused user). Without this we might forget all our VDI references 

220 # which would be a shame. 

221 # For SMB SRs, this path is mountpoint 

222 mount_path = self.path 

223 if self.handles("smb"): 223 ↛ 224line 223 didn't jump to line 224, because the condition on line 223 was never true

224 mount_path = self.mountpoint 

225 

226 if not self.handles("file") and not os.path.ismount(mount_path): 226 ↛ 227line 226 didn't jump to line 227, because the condition on line 226 was never true

227 util.SMlog("Error: FileSR.scan called but directory %s isn't a mountpoint" % mount_path) 

228 raise xs_errors.XenError('SRUnavailable', \ 

229 opterr='not mounted %s' % mount_path) 

230 

231 self._kickGC() 

232 

233 # default behaviour from here on 

234 super(FileSR, self).scan(sr_uuid) 

235 

236 @override 

237 def update(self, sr_uuid) -> None: 

238 if not self._checkmount(): 

239 raise xs_errors.XenError('SRUnavailable', \ 

240 opterr='no such directory %s' % self.path) 

241 self._update(sr_uuid, 0) 

242 

243 def _update(self, sr_uuid, virt_alloc_delta): 

244 valloc = int(self.session.xenapi.SR.get_virtual_allocation(self.sr_ref)) 

245 self.virtual_allocation = valloc + virt_alloc_delta 

246 self.physical_size = self._getsize() 

247 self.physical_utilisation = self._getutilisation() 

248 self._db_update() 

249 

250 @override 

251 def content_type(self, sr_uuid) -> str: 

252 return super(FileSR, self).content_type(sr_uuid) 

253 

254 @override 

255 def vdi(self, uuid) -> VDI.VDI: 

256 return FileVDI(self, uuid) 

257 

258 def added_vdi(self, vdi): 

259 self.vdis[vdi.uuid] = vdi 

260 

261 def deleted_vdi(self, uuid): 

262 if uuid in self.vdis: 

263 del self.vdis[uuid] 

264 

265 @override 

266 def replay(self, uuid) -> None: 

267 try: 

268 file = open(self.path + "/filelog.txt", "r") 

269 data = file.readlines() 

270 file.close() 

271 self._process_replay(data) 

272 except: 

273 raise xs_errors.XenError('SRLog') 

274 

275 def _loadvdis(self): 

276 if self.vdis: 276 ↛ 277line 276 didn't jump to line 277, because the condition on line 276 was never true

277 return 

278 

279 pattern = os.path.join(self.path, "*%s" % vhdutil.FILE_EXTN_VHD) 

280 try: 

281 self.vhds = vhdutil.getAllVHDs(pattern, FileVDI.extractUuid) 

282 except util.CommandException as inst: 

283 raise xs_errors.XenError('SRScan', opterr="error VHD-scanning " \ 

284 "path %s (%s)" % (self.path, inst)) 

285 try: 

286 list_vhds = [FileVDI.extractUuid(v) for v in util.ioretry(lambda: glob.glob(pattern))] 

287 if len(self.vhds) != len(list_vhds): 287 ↛ 292line 287 didn't jump to line 292, because the condition on line 287 was never false

288 util.SMlog("VHD scan returns %d VHDs: %s" % (len(self.vhds), sorted(self.vhds))) 

289 util.SMlog("VHD list returns %d VHDs: %s" % (len(list_vhds), sorted(list_vhds))) 

290 except: 

291 pass 

292 for uuid in self.vhds.keys(): 

293 if self.vhds[uuid].error: 293 ↛ 294line 293 didn't jump to line 294, because the condition on line 293 was never true

294 raise xs_errors.XenError('SRScan', opterr='uuid=%s' % uuid) 

295 self.vdis[uuid] = self.vdi(uuid) 

296 # Get the key hash of any encrypted VDIs: 

297 vhd_path = os.path.join(self.path, self.vhds[uuid].path) 

298 key_hash = vhdutil.getKeyHash(vhd_path) 

299 self.vdis[uuid].sm_config_override['key_hash'] = key_hash 

300 

301 # raw VDIs and CBT log files 

302 files = util.ioretry(lambda: util.listdir(self.path)) 302 ↛ exitline 302 didn't run the lambda on line 302

303 for fn in files: 303 ↛ 304line 303 didn't jump to line 304, because the loop on line 303 never started

304 if fn.endswith(vhdutil.FILE_EXTN_RAW): 

305 uuid = fn[:-(len(vhdutil.FILE_EXTN_RAW))] 

306 self.vdis[uuid] = self.vdi(uuid) 

307 elif fn.endswith(CBTLOG_TAG): 

308 cbt_uuid = fn.split(".")[0] 

309 # If an associated disk exists, update CBT status 

310 # else create new VDI of type cbt_metadata 

311 if cbt_uuid in self.vdis: 

312 self.vdis[cbt_uuid].cbt_enabled = True 

313 else: 

314 new_vdi = self.vdi(cbt_uuid) 

315 new_vdi.ty = "cbt_metadata" 

316 new_vdi.cbt_enabled = True 

317 self.vdis[cbt_uuid] = new_vdi 

318 

319 # Mark parent VDIs as Read-only and generate virtual allocation 

320 self.virtual_allocation = 0 

321 for uuid, vdi in self.vdis.items(): 

322 if vdi.parent: 322 ↛ 323line 322 didn't jump to line 323, because the condition on line 322 was never true

323 if vdi.parent in self.vdis: 

324 self.vdis[vdi.parent].read_only = True 

325 if vdi.parent in geneology: 

326 geneology[vdi.parent].append(uuid) 

327 else: 

328 geneology[vdi.parent] = [uuid] 

329 if not vdi.hidden: 329 ↛ 321line 329 didn't jump to line 321, because the condition on line 329 was never false

330 self.virtual_allocation += (vdi.size) 

331 

332 # now remove all hidden leaf nodes from self.vdis so that they are not 

333 # introduced into the Agent DB when SR is synchronized. With the 

334 # asynchronous GC, a deleted VDI might stay around until the next 

335 # SR.scan, so if we don't ignore hidden leaves we would pick up 

336 # freshly-deleted VDIs as newly-added VDIs 

337 for uuid in list(self.vdis.keys()): 

338 if uuid not in geneology and self.vdis[uuid].hidden: 338 ↛ 339line 338 didn't jump to line 339, because the condition on line 338 was never true

339 util.SMlog("Scan found hidden leaf (%s), ignoring" % uuid) 

340 del self.vdis[uuid] 

341 

342 def _getsize(self): 

343 path = self.path 

344 if self.handles("smb"): 344 ↛ 345line 344 didn't jump to line 345, because the condition on line 344 was never true

345 path = self.linkpath 

346 return util.get_fs_size(path) 

347 

348 def _getutilisation(self): 

349 return util.get_fs_utilisation(self.path) 

350 

351 def _replay(self, logentry): 

352 # all replay commands have the same 5,6,7th arguments 

353 # vdi_command, sr-uuid, vdi-uuid 

354 back_cmd = logentry[5].replace("vdi_", "") 

355 target = self.vdi(logentry[7]) 

356 cmd = getattr(target, back_cmd) 

357 args = [] 

358 for item in logentry[6:]: 

359 item = item.replace("\n", "") 

360 args.append(item) 

361 ret = cmd( * args) 

362 if ret: 

363 print(ret) 

364 

365 def _compare_args(self, a, b): 

366 try: 

367 if a[2] != "log:": 

368 return 1 

369 if b[2] != "end:" and b[2] != "error:": 

370 return 1 

371 if a[3] != b[3]: 

372 return 1 

373 if a[4] != b[4]: 

374 return 1 

375 return 0 

376 except: 

377 return 1 

378 

379 def _process_replay(self, data): 

380 logentries = [] 

381 for logentry in data: 

382 logentry = logentry.split(" ") 

383 logentries.append(logentry) 

384 # we are looking for a log entry that has a log but no end or error 

385 # wkcfix -- recreate (adjusted) logfile 

386 index = 0 

387 while index < len(logentries) - 1: 

388 if self._compare_args(logentries[index], logentries[index + 1]): 

389 self._replay(logentries[index]) 

390 else: 

391 # skip the paired one 

392 index += 1 

393 # next 

394 index += 1 

395 

396 def _kickGC(self): 

397 util.SMlog("Kicking GC") 

398 cleanup.start_gc_service(self.uuid) 

399 

400 def _isbind(self): 

401 # os.path.ismount can't deal with bind mount 

402 st1 = os.stat(self.path) 

403 st2 = os.stat(self.remotepath) 

404 return st1.st_dev == st2.st_dev and st1.st_ino == st2.st_ino 

405 

406 def _checkmount(self) -> bool: 

407 mount_path = self.path 

408 if self.handles("smb"): 408 ↛ 409line 408 didn't jump to line 409, because the condition on line 408 was never true

409 mount_path = self.mountpoint 

410 

411 return util.ioretry(lambda: util.pathexists(mount_path) and \ 

412 (util.ismount(mount_path) or \ 

413 util.pathexists(self.remotepath) and self._isbind())) 

414 

415 # Override in SharedFileSR. 

416 def _check_hardlinks(self) -> bool: 

417 return True 

418 

419class FileVDI(VDI.VDI): 

420 PARAM_VHD = "vhd" 

421 PARAM_RAW = "raw" 

422 VDI_TYPE = { 

423 PARAM_VHD: vhdutil.VDI_TYPE_VHD, 

424 PARAM_RAW: vhdutil.VDI_TYPE_RAW 

425 } 

426 

427 def _find_path_with_retries(self, vdi_uuid, maxretry=5, period=2.0): 

428 vhd_path = os.path.join(self.sr.path, "%s.%s" % \ 

429 (vdi_uuid, self.PARAM_VHD)) 

430 raw_path = os.path.join(self.sr.path, "%s.%s" % \ 

431 (vdi_uuid, self.PARAM_RAW)) 

432 cbt_path = os.path.join(self.sr.path, "%s.%s" % 

433 (vdi_uuid, CBTLOG_TAG)) 

434 found = False 

435 tries = 0 

436 while tries < maxretry and not found: 

437 tries += 1 

438 if util.ioretry(lambda: util.pathexists(vhd_path)): 

439 self.vdi_type = vhdutil.VDI_TYPE_VHD 

440 self.path = vhd_path 

441 found = True 

442 elif util.ioretry(lambda: util.pathexists(raw_path)): 

443 self.vdi_type = vhdutil.VDI_TYPE_RAW 

444 self.path = raw_path 

445 self.hidden = False 

446 found = True 

447 elif util.ioretry(lambda: util.pathexists(cbt_path)): 447 ↛ 448line 447 didn't jump to line 448, because the condition on line 447 was never true

448 self.vdi_type = CBTLOG_TAG 

449 self.path = cbt_path 

450 self.hidden = False 

451 found = True 

452 

453 if not found: 

454 util.SMlog("VHD %s not found, retry %s of %s" % (vhd_path, tries, maxretry)) 

455 time.sleep(period) 

456 

457 return found 

458 

459 @override 

460 def load(self, vdi_uuid) -> None: 

461 self.lock = self.sr.lock 

462 

463 self.sr.srcmd.params['o_direct'] = self.sr.o_direct 

464 

465 if self.sr.srcmd.cmd == "vdi_create": 

466 self.vdi_type = vhdutil.VDI_TYPE_VHD 

467 self.key_hash = None 

468 if "vdi_sm_config" in self.sr.srcmd.params: 468 ↛ 469line 468 didn't jump to line 469, because the condition on line 468 was never true

469 if "key_hash" in self.sr.srcmd.params["vdi_sm_config"]: 

470 self.key_hash = self.sr.srcmd.params["vdi_sm_config"]["key_hash"] 

471 

472 if "type" in self.sr.srcmd.params["vdi_sm_config"]: 

473 vdi_type = self.sr.srcmd.params["vdi_sm_config"]["type"] 

474 if not self.VDI_TYPE.get(vdi_type): 

475 raise xs_errors.XenError('VDIType', 

476 opterr='Invalid VDI type %s' % vdi_type) 

477 self.vdi_type = self.VDI_TYPE[vdi_type] 

478 self.path = os.path.join(self.sr.path, "%s%s" % 

479 (vdi_uuid, vhdutil.FILE_EXTN[self.vdi_type])) 

480 else: 

481 found = self._find_path_with_retries(vdi_uuid) 

482 if not found: 482 ↛ 483line 482 didn't jump to line 483, because the condition on line 482 was never true

483 if self.sr.srcmd.cmd == "vdi_delete": 

484 # Could be delete for CBT log file 

485 self.path = os.path.join(self.sr.path, "%s.%s" % 

486 (vdi_uuid, self.PARAM_VHD)) 

487 return 

488 if self.sr.srcmd.cmd == "vdi_attach_from_config": 

489 return 

490 raise xs_errors.XenError('VDIUnavailable', 

491 opterr="VDI %s not found" % vdi_uuid) 

492 

493 

494 if self.vdi_type == vhdutil.VDI_TYPE_VHD and \ 

495 self.sr.__dict__.get("vhds") and self.sr.vhds.get(vdi_uuid): 

496 # VHD info already preloaded: use it instead of querying directly 

497 vhdInfo = self.sr.vhds[vdi_uuid] 

498 self.utilisation = vhdInfo.sizePhys 

499 self.size = vhdInfo.sizeVirt 

500 self.hidden = vhdInfo.hidden 

501 if self.hidden: 501 ↛ 502line 501 didn't jump to line 502, because the condition on line 501 was never true

502 self.managed = False 

503 self.parent = vhdInfo.parentUuid 

504 if self.parent: 504 ↛ 505line 504 didn't jump to line 505, because the condition on line 504 was never true

505 self.sm_config_override = {'vhd-parent': self.parent} 

506 else: 

507 self.sm_config_override = {'vhd-parent': None} 

508 return 

509 

510 try: 

511 # Change to the SR directory in case parent 

512 # locator field path has changed 

513 os.chdir(self.sr.path) 

514 except Exception as chdir_exception: 

515 util.SMlog("Unable to change to SR directory, SR unavailable, %s" % 

516 str(chdir_exception)) 

517 raise xs_errors.XenError('SRUnavailable', opterr=str(chdir_exception)) 

518 

519 if util.ioretry( 519 ↛ exitline 519 didn't return from function 'load', because the condition on line 519 was never false

520 lambda: util.pathexists(self.path), 

521 errlist=[errno.EIO, errno.ENOENT]): 

522 try: 

523 st = util.ioretry(lambda: os.stat(self.path), 

524 errlist=[errno.EIO, errno.ENOENT]) 

525 self.utilisation = int(st.st_size) 

526 except util.CommandException as inst: 

527 if inst.code == errno.EIO: 

528 raise xs_errors.XenError('VDILoad', \ 

529 opterr='Failed load VDI information %s' % self.path) 

530 else: 

531 util.SMlog("Stat failed for %s, %s" % ( 

532 self.path, str(inst))) 

533 raise xs_errors.XenError('VDIType', \ 

534 opterr='Invalid VDI type %s' % self.vdi_type) 

535 

536 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 536 ↛ 537line 536 didn't jump to line 537, because the condition on line 536 was never true

537 self.exists = True 

538 self.size = self.utilisation 

539 self.sm_config_override = {'type': self.PARAM_RAW} 

540 return 

541 

542 if self.vdi_type == CBTLOG_TAG: 542 ↛ 543line 542 didn't jump to line 543, because the condition on line 542 was never true

543 self.exists = True 

544 self.size = self.utilisation 

545 return 

546 

547 try: 

548 # The VDI might be activated in R/W mode so the VHD footer 

549 # won't be valid, use the back-up one instead. 

550 diskinfo = util.ioretry( 

551 lambda: self._query_info(self.path, True), 

552 errlist=[errno.EIO, errno.ENOENT]) 

553 

554 if 'parent' in diskinfo: 554 ↛ 555line 554 didn't jump to line 555, because the condition on line 554 was never true

555 self.parent = diskinfo['parent'] 

556 self.sm_config_override = {'vhd-parent': self.parent} 

557 else: 

558 self.sm_config_override = {'vhd-parent': None} 

559 self.parent = '' 

560 self.size = int(diskinfo['size']) * 1024 * 1024 

561 self.hidden = int(diskinfo['hidden']) 

562 if self.hidden: 562 ↛ 563line 562 didn't jump to line 563, because the condition on line 562 was never true

563 self.managed = False 

564 self.exists = True 

565 except util.CommandException as inst: 

566 raise xs_errors.XenError('VDILoad', \ 

567 opterr='Failed load VDI information %s' % self.path) 

568 

569 @override 

570 def update(self, sr_uuid, vdi_location) -> None: 

571 self.load(vdi_location) 

572 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

573 self.sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

574 self._db_update() 

575 

576 @override 

577 def create(self, sr_uuid, vdi_uuid, size) -> str: 

578 if util.ioretry(lambda: util.pathexists(self.path)): 578 ↛ 579line 578 didn't jump to line 579, because the condition on line 578 was never true

579 raise xs_errors.XenError('VDIExists') 

580 

581 if self.vdi_type == vhdutil.VDI_TYPE_VHD: 

582 try: 

583 size = vhdutil.validate_and_round_vhd_size(int(size)) 

584 mb = 1024 * 1024 

585 size_mb = size // mb 

586 util.ioretry(lambda: self._create(str(size_mb), self.path)) 

587 self.size = util.ioretry(lambda: self._query_v(self.path)) 

588 except util.CommandException as inst: 

589 raise xs_errors.XenError('VDICreate', 

590 opterr='error %d' % inst.code) 

591 else: 

592 f = open(self.path, 'w') 

593 f.truncate(int(size)) 

594 f.close() 

595 self.size = size 

596 

597 self.sr.added_vdi(self) 

598 

599 st = util.ioretry(lambda: os.stat(self.path)) 

600 self.utilisation = int(st.st_size) 

601 if self.vdi_type == vhdutil.VDI_TYPE_RAW: 

602 self.sm_config = {"type": self.PARAM_RAW} 

603 

604 self._db_introduce() 

605 self.sr._update(self.sr.uuid, self.size) 

606 return super(FileVDI, self).get_params() 

607 

608 @override 

609 def delete(self, sr_uuid, vdi_uuid, data_only=False) -> None: 

610 if not util.ioretry(lambda: util.pathexists(self.path)): 

611 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

612 

613 if self.attached: 

614 raise xs_errors.XenError('VDIInUse') 

615 

616 try: 

617 util.force_unlink(self.path) 

618 except Exception as e: 

619 raise xs_errors.XenError( 

620 'VDIDelete', 

621 opterr='Failed to unlink file during deleting VDI: %s' % str(e)) 

622 

623 self.sr.deleted_vdi(vdi_uuid) 

624 # If this is a data_destroy call, don't remove from XAPI db 

625 if not data_only: 

626 self._db_forget() 

627 self.sr._update(self.sr.uuid, -self.size) 

628 self.sr.lock.cleanupAll(vdi_uuid) 

629 self.sr._kickGC() 

630 return super(FileVDI, self).delete(sr_uuid, vdi_uuid, data_only) 

631 

632 @override 

633 def attach(self, sr_uuid, vdi_uuid) -> str: 

634 if self.path is None: 

635 self._find_path_with_retries(vdi_uuid) 

636 if not self._checkpath(self.path): 

637 raise xs_errors.XenError('VDIUnavailable', \ 

638 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

639 try: 

640 self.attached = True 

641 

642 if not hasattr(self, 'xenstore_data'): 

643 self.xenstore_data = {} 

644 

645 self.xenstore_data.update(scsiutil.update_XS_SCSIdata(vdi_uuid, \ 

646 scsiutil.gen_synthetic_page_data(vdi_uuid))) 

647 

648 if self.sr.handles("file"): 

649 # XXX: PR-1255: if these are constants then they should 

650 # be returned by the attach API call, not persisted in the 

651 # pool database. 

652 self.xenstore_data['storage-type'] = 'ext' 

653 return super(FileVDI, self).attach(sr_uuid, vdi_uuid) 

654 except util.CommandException as inst: 

655 raise xs_errors.XenError('VDILoad', opterr='error %d' % inst.code) 

656 

657 @override 

658 def detach(self, sr_uuid, vdi_uuid) -> None: 

659 self.attached = False 

660 

661 @override 

662 def resize(self, sr_uuid, vdi_uuid, size) -> str: 

663 if not self.exists: 

664 raise xs_errors.XenError('VDIUnavailable', \ 

665 opterr='VDI %s unavailable %s' % (vdi_uuid, self.path)) 

666 

667 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

668 raise xs_errors.XenError('Unimplemented') 

669 

670 if self.hidden: 

671 raise xs_errors.XenError('VDIUnavailable', opterr='hidden VDI') 

672 

673 if size < self.size: 

674 util.SMlog('vdi_resize: shrinking not supported: ' + \ 

675 '(current size: %d, new size: %d)' % (self.size, size)) 

676 raise xs_errors.XenError('VDISize', opterr='shrinking not allowed') 

677 

678 if size == self.size: 

679 return VDI.VDI.get_params(self) 

680 

681 # We already checked it is a VDI_TYPE_VHD 

682 size = vhdutil.validate_and_round_vhd_size(int(size)) 

683 

684 jFile = JOURNAL_FILE_PREFIX + self.uuid 

685 try: 

686 vhdutil.setSizeVirt(self.path, size, jFile) 

687 except: 

688 # Revert the operation 

689 vhdutil.revert(self.path, jFile) 

690 raise xs_errors.XenError('VDISize', opterr='resize operation failed') 

691 

692 old_size = self.size 

693 self.size = vhdutil.getSizeVirt(self.path) 

694 st = util.ioretry(lambda: os.stat(self.path)) 

695 self.utilisation = int(st.st_size) 

696 

697 self._db_update() 

698 self.sr._update(self.sr.uuid, self.size - old_size) 

699 super(FileVDI, self).resize_cbt(self.sr.uuid, self.uuid, self.size) 

700 return VDI.VDI.get_params(self) 

701 

702 @override 

703 def clone(self, sr_uuid, vdi_uuid) -> str: 

704 return self._do_snapshot(sr_uuid, vdi_uuid, VDI.SNAPSHOT_DOUBLE) 

705 

706 @override 

707 def compose(self, sr_uuid, vdi1, vdi2) -> None: 

708 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

709 raise xs_errors.XenError('Unimplemented') 

710 parent_fn = vdi1 + vhdutil.FILE_EXTN[vhdutil.VDI_TYPE_VHD] 

711 parent_path = os.path.join(self.sr.path, parent_fn) 

712 assert(util.pathexists(parent_path)) 

713 vhdutil.setParent(self.path, parent_path, False) 

714 vhdutil.setHidden(parent_path) 

715 self.sr.session.xenapi.VDI.set_managed(self.sr.srcmd.params['args'][0], False) 

716 util.pread2([vhdutil.VHD_UTIL, "modify", "-p", parent_path, 

717 "-n", self.path]) 

718 # Tell tapdisk the chain has changed 

719 if not blktap2.VDI.tap_refresh(self.session, sr_uuid, vdi2): 

720 raise util.SMException("failed to refresh VDI %s" % self.uuid) 

721 util.SMlog("VDI.compose: relinked %s->%s" % (vdi2, vdi1)) 

722 

723 def reset_leaf(self, sr_uuid, vdi_uuid): 

724 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 

725 raise xs_errors.XenError('Unimplemented') 

726 

727 # safety check 

728 if not vhdutil.hasParent(self.path): 

729 raise util.SMException("ERROR: VDI %s has no parent, " + \ 

730 "will not reset contents" % self.uuid) 

731 

732 vhdutil.killData(self.path) 

733 

734 @override 

735 def _do_snapshot(self, sr_uuid, vdi_uuid, snapType, 

736 cloneOp=False, secondary=None, cbtlog=None) -> str: 

737 # If cbt enabled, save file consistency state 

738 if cbtlog is not None: 738 ↛ 739line 738 didn't jump to line 739, because the condition on line 738 was never true

739 if blktap2.VDI.tap_status(self.session, vdi_uuid): 

740 consistency_state = False 

741 else: 

742 consistency_state = True 

743 util.SMlog("Saving log consistency state of %s for vdi: %s" % 

744 (consistency_state, vdi_uuid)) 

745 else: 

746 consistency_state = None 

747 

748 if self.vdi_type != vhdutil.VDI_TYPE_VHD: 748 ↛ 749line 748 didn't jump to line 749, because the condition on line 748 was never true

749 raise xs_errors.XenError('Unimplemented') 

750 

751 if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): 751 ↛ 752line 751 didn't jump to line 752, because the condition on line 751 was never true

752 raise util.SMException("failed to pause VDI %s" % vdi_uuid) 

753 try: 

754 return self._snapshot(snapType, cbtlog, consistency_state) 

755 finally: 

756 blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary) 

757 

758 @override 

759 def _rename(self, src, dst) -> None: 

760 util.SMlog("FileVDI._rename %s to %s" % (src, dst)) 

761 util.ioretry(lambda: os.rename(src, dst)) 

762 

763 def _link(self, src, dst): 

764 util.SMlog("FileVDI._link %s to %s" % (src, dst)) 

765 os.link(src, dst) 

766 

767 def _unlink(self, path): 

768 util.SMlog("FileVDI._unlink %s" % (path)) 

769 os.unlink(path) 

770 

771 def _create_new_parent(self, src, newsrc): 

772 if self.sr._check_hardlinks(): 

773 self._link(src, newsrc) 

774 else: 

775 self._rename(src, newsrc) 

776 

777 def __fist_enospace(self): 

778 raise util.CommandException(28, "vhd-util snapshot", reason="No space") 

779 

780 def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None): 

781 util.SMlog("FileVDI._snapshot for %s (type %s)" % (self.uuid, snap_type)) 

782 

783 args = [] 

784 args.append("vdi_clone") 

785 args.append(self.sr.uuid) 

786 args.append(self.uuid) 

787 

788 dest = None 

789 dst = None 

790 if snap_type == VDI.SNAPSHOT_DOUBLE: 790 ↛ 795line 790 didn't jump to line 795, because the condition on line 790 was never false

791 dest = util.gen_uuid() 

792 dst = os.path.join(self.sr.path, "%s.%s" % (dest, self.vdi_type)) 

793 args.append(dest) 

794 

795 if self.hidden: 795 ↛ 796line 795 didn't jump to line 796, because the condition on line 795 was never true

796 raise xs_errors.XenError('VDIClone', opterr='hidden VDI') 

797 

798 depth = vhdutil.getDepth(self.path) 

799 if depth == -1: 799 ↛ 800line 799 didn't jump to line 800, because the condition on line 799 was never true

800 raise xs_errors.XenError('VDIUnavailable', \ 

801 opterr='failed to get VHD depth') 

802 elif depth >= vhdutil.MAX_CHAIN_SIZE: 802 ↛ 803line 802 didn't jump to line 803, because the condition on line 802 was never true

803 raise xs_errors.XenError('SnapshotChainTooLong') 

804 

805 newuuid = util.gen_uuid() 

806 src = self.path 

807 newsrc = os.path.join(self.sr.path, "%s.%s" % (newuuid, self.vdi_type)) 

808 newsrcname = "%s.%s" % (newuuid, self.vdi_type) 

809 

810 if not self._checkpath(src): 810 ↛ 811line 810 didn't jump to line 811, because the condition on line 810 was never true

811 raise xs_errors.XenError('VDIUnavailable', \ 

812 opterr='VDI %s unavailable %s' % (self.uuid, src)) 

813 

814 # wkcfix: multiphase 

815 util.start_log_entry(self.sr.path, self.path, args) 

816 

817 # We assume the filehandle has been released 

818 try: 

819 self._create_new_parent(src, newsrc) 

820 

821 # Create the snapshot under a temporary name, then rename 

822 # it afterwards. This avoids a small window where it exists 

823 # but is invalid. We do not need to do this for 

824 # snap_type == VDI.SNAPSHOT_DOUBLE because dst never existed 

825 # before so nobody will try to query it. 

826 tmpsrc = "%s.%s" % (src, "new") 

827 # Fault injection site to fail the snapshot with ENOSPACE 

828 util.fistpoint.activate_custom_fn( 

829 "FileSR_fail_snap1", 

830 self.__fist_enospace) 

831 util.ioretry(lambda: self._snap(tmpsrc, newsrcname)) 

832 self._rename(tmpsrc, src) 

833 if snap_type == VDI.SNAPSHOT_DOUBLE: 833 ↛ 841line 833 didn't jump to line 841, because the condition on line 833 was never false

834 # Fault injection site to fail the snapshot with ENOSPACE 

835 util.fistpoint.activate_custom_fn( 

836 "FileSR_fail_snap2", 

837 self.__fist_enospace) 

838 util.ioretry(lambda: self._snap(dst, newsrcname)) 

839 # mark the original file (in this case, its newsrc) 

840 # as hidden so that it does not show up in subsequent scans 

841 util.ioretry(lambda: self._mark_hidden(newsrc)) 

842 

843 #Verify parent locator field of both children and delete newsrc if unused 

844 introduce_parent = True 

845 try: 

846 srcparent = util.ioretry(lambda: self._query_p_uuid(src)) 

847 dstparent = None 

848 if snap_type == VDI.SNAPSHOT_DOUBLE: 848 ↛ 850line 848 didn't jump to line 850, because the condition on line 848 was never false

849 dstparent = util.ioretry(lambda: self._query_p_uuid(dst)) 

850 if srcparent != newuuid and \ 850 ↛ 854line 850 didn't jump to line 854, because the condition on line 850 was never true

851 (snap_type == VDI.SNAPSHOT_SINGLE or \ 

852 snap_type == VDI.SNAPSHOT_INTERNAL or \ 

853 dstparent != newuuid): 

854 util.ioretry(lambda: self._unlink(newsrc)) 

855 introduce_parent = False 

856 except: 

857 pass 

858 

859 # Introduce the new VDI records 

860 leaf_vdi = None 

861 if snap_type == VDI.SNAPSHOT_DOUBLE: 861 ↛ 880line 861 didn't jump to line 880, because the condition on line 861 was never false

862 leaf_vdi = VDI.VDI(self.sr, dest) # user-visible leaf VDI 

863 leaf_vdi.read_only = False 

864 leaf_vdi.location = dest 

865 leaf_vdi.size = self.size 

866 leaf_vdi.utilisation = self.utilisation 

867 leaf_vdi.sm_config = {} 

868 leaf_vdi.sm_config['vhd-parent'] = dstparent 

869 # If the parent is encrypted set the key_hash 

870 # for the new snapshot disk 

871 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

872 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

873 if "key_hash" in sm_config: 873 ↛ 874line 873 didn't jump to line 874, because the condition on line 873 was never true

874 leaf_vdi.sm_config['key_hash'] = sm_config['key_hash'] 

875 # If we have CBT enabled on the VDI, 

876 # set CBT status for the new snapshot disk 

877 if cbtlog: 877 ↛ 878line 877 didn't jump to line 878, because the condition on line 877 was never true

878 leaf_vdi.cbt_enabled = True 

879 

880 base_vdi = None 

881 if introduce_parent: 881 ↛ 893line 881 didn't jump to line 893, because the condition on line 881 was never false

882 base_vdi = VDI.VDI(self.sr, newuuid) # readonly parent 

883 base_vdi.label = "base copy" 

884 base_vdi.read_only = True 

885 base_vdi.location = newuuid 

886 base_vdi.size = self.size 

887 base_vdi.utilisation = self.utilisation 

888 base_vdi.sm_config = {} 

889 grandparent = util.ioretry(lambda: self._query_p_uuid(newsrc)) 

890 if grandparent.find("no parent") == -1: 890 ↛ 893line 890 didn't jump to line 893, because the condition on line 890 was never false

891 base_vdi.sm_config['vhd-parent'] = grandparent 

892 

893 try: 

894 if snap_type == VDI.SNAPSHOT_DOUBLE: 894 ↛ 899line 894 didn't jump to line 899, because the condition on line 894 was never false

895 leaf_vdi_ref = leaf_vdi._db_introduce() 

896 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % \ 

897 (leaf_vdi_ref, dest)) 

898 

899 if introduce_parent: 899 ↛ 903line 899 didn't jump to line 903, because the condition on line 899 was never false

900 base_vdi_ref = base_vdi._db_introduce() 

901 self.session.xenapi.VDI.set_managed(base_vdi_ref, False) 

902 util.SMlog("vdi_clone: introduced VDI: %s (%s)" % (base_vdi_ref, newuuid)) 

903 vdi_ref = self.sr.srcmd.params['vdi_ref'] 

904 sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) 

905 sm_config['vhd-parent'] = srcparent 

906 self.session.xenapi.VDI.set_sm_config(vdi_ref, sm_config) 

907 except Exception as e: 

908 util.SMlog("vdi_clone: caught error during VDI.db_introduce: %s" % (str(e))) 

909 # Note it's too late to actually clean stuff up here: the base disk has 

910 # been marked as deleted already. 

911 util.end_log_entry(self.sr.path, self.path, ["error"]) 

912 raise 

913 except util.CommandException as inst: 

914 # XXX: it might be too late if the base disk has been marked as deleted! 

915 self._clonecleanup(src, dst, newsrc) 

916 util.end_log_entry(self.sr.path, self.path, ["error"]) 

917 raise xs_errors.XenError('VDIClone', 

918 opterr='VDI clone failed error %d' % inst.code) 

919 

920 # Update cbt files if user created snapshot (SNAPSHOT_DOUBLE) 

921 if snap_type == VDI.SNAPSHOT_DOUBLE and cbtlog: 921 ↛ 922line 921 didn't jump to line 922, because the condition on line 921 was never true

922 try: 

923 self._cbt_snapshot(dest, cbt_consistency) 

924 except: 

925 # CBT operation failed. 

926 util.end_log_entry(self.sr.path, self.path, ["error"]) 

927 raise 

928 

929 util.end_log_entry(self.sr.path, self.path, ["done"]) 

930 if snap_type != VDI.SNAPSHOT_INTERNAL: 930 ↛ 933line 930 didn't jump to line 933, because the condition on line 930 was never false

931 self.sr._update(self.sr.uuid, self.size) 

932 # Return info on the new user-visible leaf VDI 

933 ret_vdi = leaf_vdi 

934 if not ret_vdi: 934 ↛ 935line 934 didn't jump to line 935, because the condition on line 934 was never true

935 ret_vdi = base_vdi 

936 if not ret_vdi: 936 ↛ 937line 936 didn't jump to line 937, because the condition on line 936 was never true

937 ret_vdi = self 

938 return ret_vdi.get_params() 

939 

940 @override 

941 def get_params(self) -> str: 

942 if not self._checkpath(self.path): 

943 raise xs_errors.XenError('VDIUnavailable', \ 

944 opterr='VDI %s unavailable %s' % (self.uuid, self.path)) 

945 return super(FileVDI, self).get_params() 

946 

947 def _snap(self, child, parent): 

948 cmd = [SR.TAPDISK_UTIL, "snapshot", vhdutil.VDI_TYPE_VHD, child, parent] 

949 text = util.pread(cmd) 

950 

951 def _clonecleanup(self, src, dst, newsrc): 

952 try: 

953 if dst: 953 ↛ 957line 953 didn't jump to line 957, because the condition on line 953 was never false

954 util.ioretry(lambda: self._unlink(dst)) 

955 except util.CommandException as inst: 

956 pass 

957 try: 

958 if util.ioretry(lambda: util.pathexists(newsrc)): 958 ↛ exitline 958 didn't return from function '_clonecleanup', because the condition on line 958 was never false

959 stats = os.stat(newsrc) 

960 # Check if we have more than one link to newsrc 

961 if (stats.st_nlink > 1): 

962 util.ioretry(lambda: self._unlink(newsrc)) 

963 elif not self._is_hidden(newsrc): 963 ↛ exitline 963 didn't return from function '_clonecleanup', because the condition on line 963 was never false

964 self._rename(newsrc, src) 

965 except util.CommandException as inst: 

966 pass 

967 

968 def _checkpath(self, path): 

969 try: 

970 if not util.ioretry(lambda: util.pathexists(path)): 970 ↛ 971line 970 didn't jump to line 971, because the condition on line 970 was never true

971 return False 

972 return True 

973 except util.CommandException as inst: 

974 raise xs_errors.XenError('EIO', \ 

975 opterr='IO error checking path %s' % path) 

976 

977 def _query_v(self, path): 

978 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-v", path] 

979 return int(util.pread(cmd)) * 1024 * 1024 

980 

981 def _query_p_uuid(self, path): 

982 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, "-p", path] 

983 parent = util.pread(cmd) 

984 parent = parent[:-1] 

985 ls = parent.split('/') 

986 return ls[len(ls) - 1].replace(vhdutil.FILE_EXTN_VHD, '') 

987 

988 def _query_info(self, path, use_bkp_footer=False): 

989 diskinfo = {} 

990 qopts = '-vpf' 

991 if use_bkp_footer: 991 ↛ 993line 991 didn't jump to line 993, because the condition on line 991 was never false

992 qopts += 'b' 

993 cmd = [SR.TAPDISK_UTIL, "query", vhdutil.VDI_TYPE_VHD, qopts, path] 

994 txt = util.pread(cmd).split('\n') 

995 diskinfo['size'] = txt[0] 

996 lst = [txt[1].split('/')[-1].replace(vhdutil.FILE_EXTN_VHD, "")] 

997 for val in filter(util.exactmatch_uuid, lst): 997 ↛ 998line 997 didn't jump to line 998, because the loop on line 997 never started

998 diskinfo['parent'] = val 

999 diskinfo['hidden'] = txt[2].split()[1] 

1000 return diskinfo 

1001 

1002 def _create(self, size, path): 

1003 cmd = [SR.TAPDISK_UTIL, "create", vhdutil.VDI_TYPE_VHD, size, path] 

1004 text = util.pread(cmd) 

1005 if self.key_hash: 1005 ↛ 1006line 1005 didn't jump to line 1006, because the condition on line 1005 was never true

1006 vhdutil.setKey(path, self.key_hash) 

1007 

1008 def _mark_hidden(self, path): 

1009 vhdutil.setHidden(path, True) 

1010 self.hidden = 1 

1011 

1012 def _is_hidden(self, path): 

1013 return vhdutil.getHidden(path) == 1 

1014 

1015 def extractUuid(path): 

1016 fileName = os.path.basename(path) 

1017 uuid = fileName.replace(vhdutil.FILE_EXTN_VHD, "") 

1018 return uuid 

1019 extractUuid = staticmethod(extractUuid) 

1020 

1021 @override 

1022 def generate_config(self, sr_uuid, vdi_uuid) -> str: 

1023 """ 

1024 Generate the XML config required to attach and activate 

1025 a VDI for use when XAPI is not running. Attach and 

1026 activation is handled by vdi_attach_from_config below. 

1027 """ 

1028 util.SMlog("FileVDI.generate_config") 

1029 if not util.pathexists(self.path): 1029 ↛ 1030line 1029 didn't jump to line 1030, because the condition on line 1029 was never true

1030 raise xs_errors.XenError('VDIUnavailable') 

1031 resp = {} 

1032 resp['device_config'] = self.sr.dconf 

1033 resp['sr_uuid'] = sr_uuid 

1034 resp['vdi_uuid'] = vdi_uuid 

1035 resp['command'] = 'vdi_attach_from_config' 

1036 # Return the 'config' encoded within a normal XMLRPC response so that 

1037 # we can use the regular response/error parsing code. 

1038 config = xmlrpc.client.dumps(tuple([resp]), "vdi_attach_from_config") 

1039 return xmlrpc.client.dumps((config, ), "", True) 

1040 

1041 @override 

1042 def attach_from_config(self, sr_uuid, vdi_uuid) -> str: 

1043 """ 

1044 Attach and activate a VDI using config generated by 

1045 vdi_generate_config above. This is used for cases such as 

1046 the HA state-file and the redo-log. 

1047 """ 

1048 util.SMlog("FileVDI.attach_from_config") 

1049 try: 

1050 if not util.pathexists(self.sr.path): 

1051 return self.sr.attach(sr_uuid) 

1052 except: 

1053 util.logException("FileVDI.attach_from_config") 

1054 raise xs_errors.XenError( 

1055 'SRUnavailable', 

1056 opterr='Unable to attach from config' 

1057 ) 

1058 return '' 

1059 

1060 @override 

1061 def _create_cbt_log(self) -> str: 

1062 # Create CBT log file 

1063 # Name: <vdi_uuid>.cbtlog 

1064 #Handle if file already exists 

1065 log_path = self._get_cbt_logpath(self.uuid) 

1066 open_file = open(log_path, "w+") 

1067 open_file.close() 

1068 return super(FileVDI, self)._create_cbt_log() 

1069 

1070 @override 

1071 def _delete_cbt_log(self) -> None: 

1072 logPath = self._get_cbt_logpath(self.uuid) 

1073 try: 

1074 os.remove(logPath) 

1075 except OSError as e: 

1076 if e.errno != errno.ENOENT: 

1077 raise 

1078 

1079 @override 

1080 def _cbt_log_exists(self, logpath) -> bool: 

1081 return util.pathexists(logpath) 

1082 

1083 

1084class SharedFileSR(FileSR): 

1085 """ 

1086 FileSR subclass for SRs that use shared network storage 

1087 """ 

1088 

1089 def _check_writable(self): 

1090 """ 

1091 Checks that the filesystem being used by the SR can be written to, 

1092 raising an exception if it can't. 

1093 """ 

1094 test_name = os.path.join(self.path, str(uuid4())) 

1095 try: 

1096 open(test_name, 'ab').close() 

1097 except OSError as e: 

1098 util.SMlog("Cannot write to SR file system: %s" % e) 

1099 raise xs_errors.XenError('SharedFileSystemNoWrite') 

1100 finally: 

1101 util.force_unlink(test_name) 

1102 

1103 def _raise_hardlink_error(self): 

1104 raise OSError(524, "Unknown error 524") 

1105 

1106 @override 

1107 def _check_hardlinks(self) -> bool: 

1108 hardlink_conf = self._read_hardlink_conf() 

1109 if hardlink_conf is not None: 1109 ↛ 1110line 1109 didn't jump to line 1110, because the condition on line 1109 was never true

1110 return hardlink_conf 

1111 

1112 test_name = os.path.join(self.path, str(uuid4())) 

1113 open(test_name, 'ab').close() 

1114 

1115 link_name = '%s.new' % test_name 

1116 try: 

1117 # XSI-1100: Let tests simulate failure of the link operation 

1118 util.fistpoint.activate_custom_fn( 

1119 "FileSR_fail_hardlink", 

1120 self._raise_hardlink_error) 

1121 

1122 os.link(test_name, link_name) 

1123 self._write_hardlink_conf(supported=True) 

1124 return True 

1125 except OSError: 

1126 self._write_hardlink_conf(supported=False) 

1127 

1128 msg = "File system for SR %s does not support hardlinks, crash " \ 

1129 "consistency of snapshots cannot be assured" % self.uuid 

1130 util.SMlog(msg, priority=util.LOG_WARNING) 

1131 # Note: session can be not set during attach/detach_from_config calls. 

1132 if self.session: 1132 ↛ 1141line 1132 didn't jump to line 1141, because the condition on line 1132 was never false

1133 try: 

1134 self.session.xenapi.message.create( 

1135 "sr_does_not_support_hardlinks", 2, "SR", self.uuid, 

1136 msg) 

1137 except XenAPI.Failure: 

1138 # Might already be set and checking has TOCTOU issues 

1139 pass 

1140 finally: 

1141 util.force_unlink(link_name) 

1142 util.force_unlink(test_name) 

1143 

1144 return False 

1145 

1146 def _get_hardlink_conf_path(self): 

1147 return os.path.join(self.path, 'sm-hardlink.conf') 

1148 

1149 def _read_hardlink_conf(self) -> Optional[bool]: 

1150 try: 

1151 with open(self._get_hardlink_conf_path(), 'r') as f: 

1152 try: 

1153 return bool(int(f.read())) 

1154 except Exception as e: 

1155 # If we can't read, assume the file is empty and test for hardlink support. 

1156 return None 

1157 except IOError as e: 

1158 if e.errno == errno.ENOENT: 

1159 # If the config file doesn't exist, assume we want to support hardlinks. 

1160 return None 

1161 util.SMlog('Failed to read hardlink conf: {}'.format(e)) 

1162 # Can be caused by a concurrent access, not a major issue. 

1163 return None 

1164 

1165 def _write_hardlink_conf(self, supported): 

1166 try: 

1167 with open(self._get_hardlink_conf_path(), 'w') as f: 

1168 f.write('1' if supported else '0') 

1169 except Exception as e: 

1170 # Can be caused by a concurrent access, not a major issue. 

1171 util.SMlog('Failed to write hardlink conf: {}'.format(e)) 

1172 

1173if __name__ == '__main__': 1173 ↛ 1174line 1173 didn't jump to line 1174, because the condition on line 1173 was never true

1174 SRCommand.run(FileSR, DRIVER_INFO) 

1175else: 

1176 SR.registerSR(FileSR)