use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class VmwareManagerImpl method prepareSecondaryStorageStore.
@Override
public void prepareSecondaryStorageStore(String storageUrl, Long storeId) {
Integer nfsVersion = imageStoreDetailsUtil.getNfsVersion(storeId);
String mountPoint = getMountPoint(storageUrl, nfsVersion);
GlobalLock lock = GlobalLock.getInternLock("prepare.systemvm");
try {
if (lock.lock(3600)) {
try {
File patchFolder = new File(mountPoint + "/systemvm");
if (!patchFolder.exists()) {
if (!patchFolder.mkdirs()) {
String msg = "Unable to create systemvm folder on secondary storage. location: " + patchFolder.toString();
s_logger.error(msg);
throw new CloudRuntimeException(msg);
}
}
File srcIso = getSystemVMPatchIsoFile();
File destIso = new File(mountPoint + "/systemvm/" + getSystemVMIsoFileNameOnDatastore());
if (!destIso.exists()) {
s_logger.info("Inject SSH key pairs before copying systemvm.iso into secondary storage");
_configServer.updateKeyPairs();
s_logger.info("Copy System VM patch ISO file to secondary storage. source ISO: " + srcIso.getAbsolutePath() + ", destination: " + destIso.getAbsolutePath());
try {
FileUtil.copyfile(srcIso, destIso);
} catch (IOException e) {
s_logger.error("Unexpected exception ", e);
String msg = "Unable to copy systemvm ISO on secondary storage. src location: " + srcIso.toString() + ", dest location: " + destIso;
s_logger.error(msg);
throw new CloudRuntimeException(msg);
}
} else {
if (s_logger.isTraceEnabled()) {
s_logger.trace("SystemVM ISO file " + destIso.getPath() + " already exists");
}
}
} finally {
lock.unlock();
}
}
} finally {
lock.releaseRef();
}
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class StorageManagerImpl method cleanupStorage.
@Override
public void cleanupStorage(boolean recurring) {
GlobalLock scanLock = GlobalLock.getInternLock("storagemgr.cleanup");
try {
if (scanLock.lock(3)) {
try {
// Cleanup primary storage pools
if (_templateCleanupEnabled) {
List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
for (StoragePoolVO pool : storagePools) {
try {
List<VMTemplateStoragePoolVO> unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(pool);
s_logger.debug("Storage pool garbage collector found " + unusedTemplatesInPool.size() + " templates to clean up in storage pool: " + pool.getName());
for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) {
if (templatePoolVO.getDownloadState() != VMTemplateStorageResourceAssoc.Status.DOWNLOADED) {
s_logger.debug("Storage pool garbage collector is skipping template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId() + " because it is not completely downloaded.");
continue;
}
if (!templatePoolVO.getMarkedForGC()) {
templatePoolVO.setMarkedForGC(true);
_vmTemplatePoolDao.update(templatePoolVO.getId(), templatePoolVO);
s_logger.debug("Storage pool garbage collector has marked template with ID: " + templatePoolVO.getTemplateId() + " on pool " + templatePoolVO.getPoolId() + " for garbage collection.");
continue;
}
_tmpltMgr.evictTemplateFromStoragePool(templatePoolVO);
}
} catch (Exception e) {
s_logger.warn("Problem cleaning up primary storage pool " + pool, e);
}
}
}
cleanupSecondaryStorage(recurring);
List<VolumeVO> vols = _volsDao.listVolumesToBeDestroyed(new Date(System.currentTimeMillis() - ((long) StorageCleanupDelay.value() << 10)));
for (VolumeVO vol : vols) {
try {
// If this fails, just log a warning. It's ideal if we clean up the host-side clustered file
// system, but not necessary.
handleManagedStorage(vol);
} catch (Exception e) {
s_logger.warn("Unable to destroy host-side clustered file system " + vol.getUuid(), e);
}
try {
VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
if (volumeInfo != null) {
volService.expungeVolumeAsync(volumeInfo);
} else {
s_logger.debug("Volume " + vol.getUuid() + " is already destroyed");
}
} catch (Exception e) {
s_logger.warn("Unable to destroy volume " + vol.getUuid(), e);
}
}
// remove snapshots in Error state
List<SnapshotVO> snapshots = _snapshotDao.listAllByStatus(Snapshot.State.Error);
for (SnapshotVO snapshotVO : snapshots) {
try {
List<SnapshotDataStoreVO> storeRefs = _snapshotStoreDao.findBySnapshotId(snapshotVO.getId());
for (SnapshotDataStoreVO ref : storeRefs) {
_snapshotStoreDao.expunge(ref.getId());
}
_snapshotDao.expunge(snapshotVO.getId());
} catch (Exception e) {
s_logger.warn("Unable to destroy snapshot " + snapshotVO.getUuid(), e);
}
}
// destroy uploaded volumes in abandoned/error state
List<VolumeDataStoreVO> volumeDataStores = _volumeDataStoreDao.listByVolumeState(Volume.State.UploadError, Volume.State.UploadAbandoned);
for (VolumeDataStoreVO volumeDataStore : volumeDataStores) {
VolumeVO volume = _volumeDao.findById(volumeDataStore.getVolumeId());
if (volume == null) {
s_logger.warn("Uploaded volume with id " + volumeDataStore.getVolumeId() + " not found, so cannot be destroyed");
continue;
}
try {
DataStore dataStore = _dataStoreMgr.getDataStore(volumeDataStore.getDataStoreId(), DataStoreRole.Image);
EndPoint ep = _epSelector.select(dataStore, volumeDataStore.getExtractUrl());
if (ep == null) {
s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded volume " + volume.getUuid());
continue;
}
Host host = _hostDao.findById(ep.getId());
if (host != null && host.getManagementServerId() != null) {
if (_serverId == host.getManagementServerId().longValue()) {
if (!volService.destroyVolume(volume.getId())) {
s_logger.warn("Unable to destroy uploaded volume " + volume.getUuid());
continue;
}
// decrement volume resource count
_resourceLimitMgr.decrementResourceCount(volume.getAccountId(), ResourceType.volume, volume.isDisplayVolume());
// expunge volume from secondary if volume is on image store
VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image);
if (volOnSecondary != null) {
s_logger.info("Expunging volume " + volume.getUuid() + " uploaded using HTTP POST from secondary data store");
AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volOnSecondary);
VolumeApiResult result = future.get();
if (!result.isSuccess()) {
s_logger.warn("Failed to expunge volume " + volume.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult());
}
}
}
}
} catch (Throwable th) {
s_logger.warn("Unable to destroy uploaded volume " + volume.getUuid() + ". Error details: " + th.getMessage());
}
}
// destroy uploaded templates in abandoned/error state
List<TemplateDataStoreVO> templateDataStores = _templateStoreDao.listByTemplateState(VirtualMachineTemplate.State.UploadError, VirtualMachineTemplate.State.UploadAbandoned);
for (TemplateDataStoreVO templateDataStore : templateDataStores) {
VMTemplateVO template = _templateDao.findById(templateDataStore.getTemplateId());
if (template == null) {
s_logger.warn("Uploaded template with id " + templateDataStore.getTemplateId() + " not found, so cannot be destroyed");
continue;
}
try {
DataStore dataStore = _dataStoreMgr.getDataStore(templateDataStore.getDataStoreId(), DataStoreRole.Image);
EndPoint ep = _epSelector.select(dataStore, templateDataStore.getExtractUrl());
if (ep == null) {
s_logger.warn("There is no secondary storage VM for image store " + dataStore.getName() + ", cannot destroy uploaded template " + template.getUuid());
continue;
}
Host host = _hostDao.findById(ep.getId());
if (host != null && host.getManagementServerId() != null) {
if (_serverId == host.getManagementServerId().longValue()) {
AsyncCallFuture<TemplateApiResult> future = _imageSrv.deleteTemplateAsync(tmplFactory.getTemplate(template.getId(), dataStore));
TemplateApiResult result = future.get();
if (!result.isSuccess()) {
s_logger.warn("Failed to delete template " + template.getUuid() + " from the image store " + dataStore.getName() + " due to: " + result.getResult());
continue;
}
// remove from template_zone_ref
List<VMTemplateZoneVO> templateZones = _vmTemplateZoneDao.listByZoneTemplate(((ImageStoreEntity) dataStore).getDataCenterId(), template.getId());
if (templateZones != null) {
for (VMTemplateZoneVO templateZone : templateZones) {
_vmTemplateZoneDao.remove(templateZone.getId());
}
}
// mark all the occurrences of this template in the given store as destroyed
_templateStoreDao.removeByTemplateStore(template.getId(), dataStore.getId());
// find all eligible image stores for this template
List<DataStore> imageStores = _tmpltMgr.getImageStoreByTemplate(template.getId(), null);
if (imageStores == null || imageStores.size() == 0) {
template.setState(VirtualMachineTemplate.State.Inactive);
_templateDao.update(template.getId(), template);
// decrement template resource count
_resourceLimitMgr.decrementResourceCount(template.getAccountId(), ResourceType.template);
}
}
}
} catch (Throwable th) {
s_logger.warn("Unable to destroy uploaded template " + template.getUuid() + ". Error details: " + th.getMessage());
}
}
} finally {
scanLock.unlock();
}
}
} finally {
scanLock.releaseRef();
}
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class AsyncJobManagerImpl method getGCTask.
@DB
private Runnable getGCTask() {
return new ManagedContextRunnable() {
@Override
protected void runInContext() {
GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC");
try {
if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) {
try {
reallyRun();
} finally {
scanLock.unlock();
}
}
} finally {
scanLock.releaseRef();
}
}
public void reallyRun() {
try {
s_logger.info("Begin cleanup expired async-jobs");
// forcefully cancel blocking queue items if they've been staying there for too long
List<SyncQueueItemVO> blockItems = _queueMgr.getBlockedQueueItems(JobCancelThresholdMinutes.value() * 60000, false);
if (blockItems != null && blockItems.size() > 0) {
for (SyncQueueItemVO item : blockItems) {
try {
if (item.getContentType().equalsIgnoreCase(SyncQueueItem.AsyncJobContentType)) {
s_logger.info("Remove Job-" + item.getContentId() + " from Queue-" + item.getId() + " since it has been blocked for too long");
completeAsyncJob(item.getContentId(), JobInfo.Status.FAILED, 0, "Job is cancelled as it has been blocking others for too long");
_jobMonitor.unregisterByJobId(item.getContentId());
}
// purge the item and resume queue processing
_queueMgr.purgeItem(item.getId());
} catch (Throwable e) {
s_logger.error("Unexpected exception when trying to remove job from sync queue, ", e);
}
}
}
Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - JobExpireMinutes.value() * 60000);
// limit to 100 jobs per turn, this gives cleanup throughput as 600 jobs per minute
// hopefully this will be fast enough to balance potential growth of job table
// 1) Expire unfinished jobs that weren't processed yet
List<AsyncJobVO> unfinishedJobs = _jobDao.getExpiredUnfinishedJobs(cutTime, 100);
for (AsyncJobVO job : unfinishedJobs) {
try {
s_logger.info("Expunging unfinished job-" + job.getId());
_jobMonitor.unregisterByJobId(job.getId());
expungeAsyncJob(job);
} catch (Throwable e) {
s_logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e);
}
}
// 2) Expunge finished jobs
List<AsyncJobVO> completedJobs = _jobDao.getExpiredCompletedJobs(cutTime, 100);
for (AsyncJobVO job : completedJobs) {
try {
s_logger.info("Expunging completed job-" + job.getId());
expungeAsyncJob(job);
} catch (Throwable e) {
s_logger.error("Unexpected exception when trying to expunge job-" + job.getId(), e);
}
}
s_logger.info("End cleanup expired async-jobs");
} catch (Throwable e) {
s_logger.error("Unexpected exception when trying to execute queue item, ", e);
}
}
};
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class SolidFireSharedPrimaryDataStoreLifeCycle method deleteDataStore.
// invoked to delete primary storage that is based on the SolidFire plug-in
@Override
public boolean deleteDataStore(DataStore dataStore) {
List<StoragePoolHostVO> hostPoolRecords = _storagePoolHostDao.listByPoolId(dataStore.getId());
HypervisorType hypervisorType = null;
if (hostPoolRecords.size() > 0) {
hypervisorType = getHypervisorType(hostPoolRecords.get(0).getHostId());
}
if (!isSupportedHypervisorType(hypervisorType)) {
throw new CloudRuntimeException(hypervisorType + " is not a supported hypervisor type.");
}
StoragePool storagePool = (StoragePool) dataStore;
StoragePoolVO storagePoolVO = _primaryDataStoreDao.findById(storagePool.getId());
List<VMTemplateStoragePoolVO> unusedTemplatesInPool = _tmpltMgr.getUnusedTemplatesInPool(storagePoolVO);
for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) {
_tmpltMgr.evictTemplateFromStoragePool(templatePoolVO);
}
Long clusterId = null;
for (StoragePoolHostVO host : hostPoolRecords) {
DeleteStoragePoolCommand deleteCmd = new DeleteStoragePoolCommand(storagePool);
if (HypervisorType.VMware.equals(hypervisorType)) {
deleteCmd.setRemoveDatastore(true);
Map<String, String> details = new HashMap<String, String>();
StoragePoolDetailVO storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.DATASTORE_NAME);
details.put(DeleteStoragePoolCommand.DATASTORE_NAME, storagePoolDetail.getValue());
storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.IQN);
details.put(DeleteStoragePoolCommand.IQN, storagePoolDetail.getValue());
storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.STORAGE_VIP);
details.put(DeleteStoragePoolCommand.STORAGE_HOST, storagePoolDetail.getValue());
storagePoolDetail = _storagePoolDetailsDao.findDetail(storagePool.getId(), SolidFireUtil.STORAGE_PORT);
details.put(DeleteStoragePoolCommand.STORAGE_PORT, storagePoolDetail.getValue());
deleteCmd.setDetails(details);
}
final Answer answer = _agentMgr.easySend(host.getHostId(), deleteCmd);
if (answer != null && answer.getResult()) {
s_logger.info("Successfully deleted storage pool using Host ID " + host.getHostId());
HostVO hostVO = _hostDao.findById(host.getHostId());
if (hostVO != null) {
clusterId = hostVO.getClusterId();
}
break;
} else {
s_logger.error("Failed to delete storage pool using Host ID " + host.getHostId() + ": " + answer.getResult());
}
}
if (clusterId != null) {
ClusterVO cluster = _clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(SolidFireUtil.s_lockTimeInSeconds)) {
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
s_logger.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
removeVolumeFromVag(storagePool.getId(), clusterId);
} finally {
lock.unlock();
lock.releaseRef();
}
}
deleteSolidFireVolume(storagePool.getId());
return _primaryDataStoreHelper.deletePrimaryDataStore(dataStore);
}
use of com.cloud.utils.db.GlobalLock in project cloudstack by apache.
the class SolidFireUtil method hostAddedToOrRemovedFromCluster.
public static void hostAddedToOrRemovedFromCluster(long hostId, long clusterId, boolean added, String storageProvider, ClusterDao clusterDao, ClusterDetailsDao clusterDetailsDao, PrimaryDataStoreDao storagePoolDao, StoragePoolDetailsDao storagePoolDetailsDao, HostDao hostDao) {
ClusterVO cluster = clusterDao.findById(clusterId);
GlobalLock lock = GlobalLock.getInternLock(cluster.getUuid());
if (!lock.lock(s_lockTimeInSeconds)) {
String errMsg = "Couldn't lock the DB on the following string: " + cluster.getUuid();
s_logger.debug(errMsg);
throw new CloudRuntimeException(errMsg);
}
try {
List<StoragePoolVO> storagePools = storagePoolDao.findPoolsByProvider(storageProvider);
if (storagePools != null && storagePools.size() > 0) {
List<SolidFireUtil.SolidFireConnection> sfConnections = new ArrayList<>();
for (StoragePoolVO storagePool : storagePools) {
ClusterDetailsVO clusterDetail = clusterDetailsDao.findDetail(clusterId, SolidFireUtil.getVagKey(storagePool.getId()));
String vagId = clusterDetail != null ? clusterDetail.getValue() : null;
if (vagId != null) {
SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePool.getId(), storagePoolDetailsDao);
if (!sfConnections.contains(sfConnection)) {
sfConnections.add(sfConnection);
SolidFireUtil.SolidFireVag sfVag = SolidFireUtil.getVag(sfConnection, Long.parseLong(vagId));
List<HostVO> hostsToAddOrRemove = new ArrayList<>();
HostVO hostToAddOrRemove = hostDao.findByIdIncludingRemoved(hostId);
hostsToAddOrRemove.add(hostToAddOrRemove);
String[] hostIqns = SolidFireUtil.getNewHostIqns(sfVag.getInitiators(), SolidFireUtil.getIqnsFromHosts(hostsToAddOrRemove), added);
SolidFireUtil.modifyVag(sfConnection, sfVag.getId(), hostIqns, sfVag.getVolumeIds());
}
}
}
}
} finally {
lock.unlock();
lock.releaseRef();
}
}
Aggregations