use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class HDSDeleteVolumeJob method updateStatus.
/**
* Called to update the job status when the volume delete job completes.
*
* @param jobContext The job context.
*/
public void updateStatus(JobContext jobContext) throws Exception {
DbClient dbClient = jobContext.getDbClient();
try {
if (_status == JobStatus.IN_PROGRESS) {
return;
}
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
HDSApiClient hdsApiClient = jobContext.getHdsApiFactory().getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword());
// Get list of volumes; get set of storage pool ids to which they
// belong.
List<Volume> volumes = new ArrayList<Volume>();
Set<URI> poolURIs = new HashSet<URI>();
for (URI id : getTaskCompleter().getIds()) {
// Volume volume = dbClient.queryObject(Volume.class, id);
Volume volume = (Volume) BlockObject.fetch(dbClient, id);
if (volume != null && !volume.getInactive()) {
volumes.add(volume);
poolURIs.add(volume.getPool());
}
}
// If terminal state update storage pool capacity
if (_status == JobStatus.SUCCESS || _status == JobStatus.FAILED) {
// Update capacity of storage pools.
for (URI poolURI : poolURIs) {
StoragePool storagePool = dbClient.queryObject(StoragePool.class, poolURI);
HDSUtils.updateStoragePoolCapacity(dbClient, hdsApiClient, storagePool);
}
}
StringBuilder logMsgBuilder = new StringBuilder();
if (_status == JobStatus.SUCCESS) {
for (Volume volume : volumes) {
if (logMsgBuilder.length() != 0) {
logMsgBuilder.append("\n");
}
logMsgBuilder.append(String.format("Successfully deleted volume %s", volume.getId()));
}
} else if (_status == JobStatus.FAILED) {
for (URI id : getTaskCompleter().getIds()) {
if (logMsgBuilder.length() != 0) {
logMsgBuilder.append("\n");
}
logMsgBuilder.append(String.format("Failed to delete volume: %s", id));
}
}
if (logMsgBuilder.length() > 0) {
_log.info(logMsgBuilder.toString());
}
} catch (Exception e) {
setPostProcessingErrorStatus("Encountered an internal error during delete volume job status processing: " + e.getMessage());
_log.error("Caught exception while handling updateStatus for delete volume job.", e);
} finally {
super.updateStatus(jobContext);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class HDSVolumeExpandJob method updateStatus.
/**
* Called to update the job status when the volume expand job completes.
*
* @param jobContext
* The job context.
*/
@Override
public void updateStatus(JobContext jobContext) throws Exception {
LogicalUnit logicalUnit = null;
try {
if (_status == JobStatus.IN_PROGRESS) {
return;
}
DbClient dbClient = jobContext.getDbClient();
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
HDSApiClient hdsApiClient = jobContext.getHdsApiFactory().getClient(HDSUtils.getHDSServerManagementServerInfo(storageSystem), storageSystem.getSmisUserName(), storageSystem.getSmisPassword());
// from pool's reserved capacity map.
if (_status == JobStatus.SUCCESS || _status == JobStatus.FAILED) {
StoragePool storagePool = dbClient.queryObject(StoragePool.class, storagePoolURI);
HDSUtils.updateStoragePoolCapacity(dbClient, hdsApiClient, storagePool);
StringMap reservationMap = storagePool.getReservedCapacityMap();
URI volumeId = getTaskCompleter().getId();
// remove from reservation map
reservationMap.remove(volumeId.toString());
dbClient.persistObject(storagePool);
}
String opId = getTaskCompleter().getOpId();
StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s, task: %s", this.getJobName(), _status.name(), opId));
if (_status == JobStatus.SUCCESS) {
VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
// set requested capacity
volume.setCapacity(taskCompleter.getSize());
// set meta related properties
volume.setIsComposite(taskCompleter.isComposite());
volume.setCompositionType(taskCompleter.getMetaVolumeType());
logicalUnit = (LogicalUnit) _javaResult.getBean("logicalunit");
if (null != logicalUnit) {
long capacityInBytes = (Long.valueOf(logicalUnit.getCapacityInKB())) * 1024L;
volume.setProvisionedCapacity(capacityInBytes);
volume.setAllocatedCapacity(capacityInBytes);
}
logMsgBuilder.append(String.format("%n Capacity: %s, Provisioned capacity: %s, Allocated Capacity: %s", volume.getCapacity(), volume.getProvisionedCapacity(), volume.getAllocatedCapacity()));
if (volume.getIsComposite()) {
logMsgBuilder.append(String.format("%n Is Meta: %s, Total meta member capacity: %s, Meta member count %s, Meta member size: %s", volume.getIsComposite(), volume.getTotalMetaMemberCapacity(), volume.getMetaMemberCount(), volume.getMetaMemberSize()));
}
_log.info(logMsgBuilder.toString());
dbClient.persistObject(volume);
// Reset list of meta members native ids in WF data (when meta
// is created meta members are removed from array)
WorkflowService.getInstance().storeStepData(opId, new ArrayList<String>());
}
} catch (Exception e) {
_log.error("Caught an exception while trying to updateStatus for HDSVolumeExpandJob", e);
setErrorStatus("Encountered an internal error during volume expand job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class CephCloneOperations method detachSingleClone.
@Override
public void detachSingleClone(StorageSystem storageSystem, URI cloneVolume, TaskCompleter taskCompleter) {
_log.info("START detachSingleClone operation");
try (CephClient cephClient = getClient(storageSystem)) {
Volume cloneObject = _dbClient.queryObject(Volume.class, cloneVolume);
String cloneId = cloneObject.getNativeId();
StoragePool pool = _dbClient.queryObject(StoragePool.class, cloneObject.getPool());
String poolId = pool.getPoolName();
BlockSnapshot sourceSnapshot = _dbClient.queryObject(BlockSnapshot.class, cloneObject.getAssociatedSourceVolume());
String snapshotId = sourceSnapshot.getNativeId();
Volume parentVolume = _dbClient.queryObject(Volume.class, sourceSnapshot.getParent());
String parentVolumeId = parentVolume.getNativeId();
try {
// Flatten image (detach Ceph volume from Ceph snapshot)
// http://docs.ceph.com/docs/master/rbd/rbd-snapshot/#getting-started-with-layering
cephClient.flattenImage(poolId, cloneId);
// Detach links
ReplicationUtils.removeDetachedFullCopyFromSourceFullCopiesList(cloneObject, _dbClient);
cloneObject.setAssociatedSourceVolume(NullColumnValueGetter.getNullURI());
cloneObject.setReplicaState(ReplicationState.DETACHED.name());
_dbClient.updateObject(cloneObject);
// Un-protect snapshot if it was the last child and delete internal interim snapshot
List<String> children = cephClient.getChildren(poolId, parentVolumeId, snapshotId);
if (children.isEmpty()) {
// Unprotect snapshot to enable deleting
if (cephClient.snapIsProtected(poolId, parentVolumeId, snapshotId)) {
cephClient.unprotectSnap(poolId, parentVolumeId, snapshotId);
}
// and should be deleted at the step of detaching during full copy creation workflow
if (sourceSnapshot.checkInternalFlags(Flag.INTERNAL_OBJECT)) {
cephClient.deleteSnap(poolId, parentVolumeId, snapshotId);
// Set to null to prevent handling in cleanUpCloneObjects
snapshotId = null;
_dbClient.markForDeletion(sourceSnapshot);
}
} else if (sourceSnapshot.checkInternalFlags(Flag.INTERNAL_OBJECT)) {
// If the snapshot (not interim) still has children, it may be used for another cloning right now
// So that log the warning for interim snapshot only
_log.warn("Could not delete interim snapshot {} because its Ceph snapshot {}@{} unexpectedly had another child", sourceSnapshot.getId(), parentVolumeId, snapshotId);
}
taskCompleter.ready(_dbClient);
} catch (Exception e) {
// Although detachSingleClone may be again called on error, it is better to remove objects now.
cleanUpCloneObjects(cephClient, poolId, cloneId, snapshotId, parentVolumeId, sourceSnapshot);
throw e;
}
} catch (Exception e) {
BlockObject obj = BlockObject.fetch(_dbClient, cloneVolume);
if (obj != null) {
obj.setInactive(true);
_dbClient.updateObject(obj);
}
_log.error("Encountered an exception", e);
ServiceCoded code = DeviceControllerErrors.ceph.operationFailed("detachSingleClone", e.getMessage());
taskCompleter.error(_dbClient, code);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class CephCloneOperations method createSingleClone.
@Override
public void createSingleClone(StorageSystem storageSystem, URI source, URI cloneVolume, Boolean createInactive, TaskCompleter taskCompleter) {
_log.info("START createSingleClone operation");
try (CephClient cephClient = getClient(storageSystem)) {
Volume cloneObject = _dbClient.queryObject(Volume.class, cloneVolume);
BlockObject sourceObject = BlockObject.fetch(_dbClient, source);
BlockSnapshot sourceSnapshot = null;
Volume parentVolume = null;
if (sourceObject instanceof BlockSnapshot) {
// Use source snapshot as clone source
sourceSnapshot = (BlockSnapshot) sourceObject;
parentVolume = _dbClient.queryObject(Volume.class, sourceSnapshot.getParent());
} else if (sourceObject instanceof Volume) {
// Use interim snapshot as clone source, since Ceph can clone snapshots only
// http://docs.ceph.com/docs/master/rbd/rbd-snapshot/#getting-started-with-layering
parentVolume = (Volume) sourceObject;
sourceSnapshot = prepareInternalSnapshotForVolume(parentVolume);
} else {
String msg = String.format("Unsupported block object type URI %s", source);
ServiceCoded code = DeviceControllerErrors.ceph.operationFailed("createSingleClone", msg);
taskCompleter.error(_dbClient, code);
return;
}
StoragePool pool = _dbClient.queryObject(StoragePool.class, parentVolume.getPool());
String poolId = pool.getPoolName();
String parentVolumeId = parentVolume.getNativeId();
String snapshotId = sourceSnapshot.getNativeId();
String cloneId = null;
try {
if (snapshotId == null || snapshotId.isEmpty()) {
// Create Ceph snapshot of volume requested to clone
snapshotId = CephUtils.createNativeId(sourceSnapshot);
cephClient.createSnap(poolId, parentVolumeId, snapshotId);
sourceSnapshot.setNativeId(snapshotId);
sourceSnapshot.setDeviceLabel(snapshotId);
sourceSnapshot.setIsSyncActive(true);
sourceSnapshot.setParent(new NamedURI(parentVolume.getId(), parentVolume.getLabel()));
_dbClient.updateObject(sourceSnapshot);
_log.info("Interim shapshot {} created for clone {}", sourceSnapshot.getId(), cloneObject.getId());
}
// Ceph requires cloning snapshot to be protected (from deleting)
if (!cephClient.snapIsProtected(poolId, parentVolumeId, snapshotId)) {
cephClient.protectSnap(poolId, parentVolumeId, snapshotId);
}
// Do cloning
String cloneVolumeId = CephUtils.createNativeId(cloneObject);
cephClient.cloneSnap(poolId, parentVolumeId, snapshotId, cloneVolumeId);
cloneId = cloneVolumeId;
// Update clone object
cloneObject.setDeviceLabel(cloneId);
cloneObject.setNativeId(cloneId);
cloneObject.setNativeGuid(NativeGUIDGenerator.generateNativeGuid(_dbClient, cloneObject));
cloneObject.setProvisionedCapacity(parentVolume.getProvisionedCapacity());
cloneObject.setAllocatedCapacity(parentVolume.getAllocatedCapacity());
cloneObject.setAssociatedSourceVolume(sourceSnapshot.getId());
_dbClient.updateObject(cloneObject);
// Finish task
taskCompleter.ready(_dbClient);
} catch (Exception e) {
// Clean up created objects
cleanUpCloneObjects(cephClient, poolId, cloneId, snapshotId, parentVolumeId, sourceSnapshot);
throw e;
}
} catch (Exception e) {
BlockObject obj = BlockObject.fetch(_dbClient, cloneVolume);
if (obj != null) {
obj.setInactive(true);
_dbClient.updateObject(obj);
}
_log.error("Encountered an exception", e);
ServiceCoded code = DeviceControllerErrors.ceph.operationFailed("createSingleClone", e.getMessage());
taskCompleter.error(_dbClient, code);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class CephSnapshotOperations method createSingleVolumeSnapshot.
@Override
public void createSingleVolumeSnapshot(StorageSystem storage, URI snapshot, Boolean createInactive, Boolean readOnly, TaskCompleter taskCompleter) throws DeviceControllerException {
try (CephClient cephClient = getClient(storage)) {
BlockSnapshot blockSnapshot = _dbClient.queryObject(BlockSnapshot.class, snapshot);
Volume volume = _dbClient.queryObject(Volume.class, blockSnapshot.getParent().getURI());
StoragePool pool = _dbClient.queryObject(StoragePool.class, volume.getPool());
String id = CephUtils.createNativeId(blockSnapshot);
cephClient.createSnap(pool.getPoolName(), volume.getNativeId(), id);
blockSnapshot.setNativeId(id);
blockSnapshot.setDeviceLabel(blockSnapshot.getLabel());
blockSnapshot.setIsSyncActive(true);
_dbClient.updateObject(blockSnapshot);
taskCompleter.ready(_dbClient);
} catch (Exception e) {
_log.error("Snapshot creation failed", e);
ServiceError error = DeviceControllerErrors.ceph.operationFailed("createSingleVolumeSnapshot", e.getMessage());
taskCompleter.error(_dbClient, error);
}
}
Aggregations