use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class CephSnapshotOperations method deleteSingleVolumeSnapshot.
@Override
public void deleteSingleVolumeSnapshot(StorageSystem storage, URI snapshot, TaskCompleter taskCompleter) throws DeviceControllerException {
try (CephClient cephClient = getClient(storage)) {
BlockSnapshot blockSnapshot = _dbClient.queryObject(BlockSnapshot.class, snapshot);
Volume volume = _dbClient.queryObject(Volume.class, blockSnapshot.getParent().getURI());
StoragePool pool = _dbClient.queryObject(StoragePool.class, volume.getPool());
cephClient.deleteSnap(pool.getPoolName(), volume.getNativeId(), blockSnapshot.getNativeId());
blockSnapshot.setInactive(true);
_dbClient.updateObject(blockSnapshot);
taskCompleter.ready(_dbClient);
} catch (Exception e) {
_log.error("Snapshot deletion failed", e);
ServiceError error = DeviceControllerErrors.ceph.operationFailed("deleteSingleVolumeSnapshot", e.getMessage());
taskCompleter.error(_dbClient, error);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class CinderCloneOperations method createSingleClone.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.CloneOperations#createSingleClone(
* com.emc.storageos.db.client.model.StorageSystem, java.net.URI, java.net.URI,
* java.lang.Boolean,
* com.emc.storageos.volumecontroller.TaskCompleter)
*/
@Override
public void createSingleClone(StorageSystem storageSystem, URI sourceObject, URI cloneVolume, Boolean createInactive, TaskCompleter taskCompleter) {
log.info("START createSingleClone operation");
boolean isVolumeClone = true;
try {
BlockObject sourceObj = BlockObject.fetch(dbClient, sourceObject);
URI tenantUri = null;
if (sourceObj instanceof BlockSnapshot) {
// In case of snapshot, get the tenant from its parent volume
NamedURI parentVolUri = ((BlockSnapshot) sourceObj).getParent();
Volume parentVolume = dbClient.queryObject(Volume.class, parentVolUri);
tenantUri = parentVolume.getTenant().getURI();
isVolumeClone = false;
} else {
// This is a default flow
tenantUri = ((Volume) sourceObj).getTenant().getURI();
isVolumeClone = true;
}
Volume cloneObj = dbClient.queryObject(Volume.class, cloneVolume);
StoragePool targetPool = dbClient.queryObject(StoragePool.class, cloneObj.getPool());
TenantOrg tenantOrg = dbClient.queryObject(TenantOrg.class, tenantUri);
// String cloneLabel = generateLabel(tenantOrg, cloneObj);
CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storageSystem.getActiveProviderURI(), dbClient);
log.info("Getting the cinder APi for the provider with id " + storageSystem.getActiveProviderURI());
CinderApi cinderApi = cinderApiFactory.getApi(storageSystem.getActiveProviderURI(), ep);
String volumeId = "";
if (isVolumeClone) {
volumeId = cinderApi.cloneVolume(cloneObj.getLabel(), (cloneObj.getCapacity() / (1024 * 1024 * 1024)), targetPool.getNativeId(), sourceObj.getNativeId());
} else {
volumeId = cinderApi.createVolumeFromSnapshot(cloneObj.getLabel(), (cloneObj.getCapacity() / (1024 * 1024 * 1024)), targetPool.getNativeId(), sourceObj.getNativeId());
}
log.debug("Creating volume with the id " + volumeId + " on Openstack cinder node");
if (volumeId != null) {
// Cinder volume/snapshot clones are not sync with source, so
// set the replication state as DETACHED
cloneObj.setReplicaState(ReplicationState.DETACHED.name());
dbClient.persistObject(cloneObj);
Map<String, URI> volumeIds = new HashMap<String, URI>();
volumeIds.put(volumeId, cloneObj.getId());
ControllerServiceImpl.enqueueJob(new QueueJob(new CinderSingleVolumeCreateJob(volumeId, cloneObj.getLabel(), storageSystem.getId(), CinderConstants.ComponentType.volume.name(), ep, taskCompleter, targetPool.getId(), volumeIds)));
}
} catch (InternalException e) {
String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
log.error(errorMsg, e);
taskCompleter.error(dbClient, e);
} catch (Exception e) {
String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
log.error(errorMsg, e);
ServiceError serviceError = DeviceControllerErrors.cinder.operationFailed("createSingleClone", e.getMessage());
taskCompleter.error(dbClient, serviceError);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class AbstractCinderVolumeCreateJob method updateStatus.
/**
* Called to update the job status when the volume create job completes.
* This is common update code for volume create operations.
*
* @param jobContext The job context.
*/
@Override
public void updateStatus(JobContext jobContext) throws Exception {
DbClient dbClient = jobContext.getDbClient();
try {
// Do nothing if the job is not completed yet
if (status == JobStatus.IN_PROGRESS) {
return;
}
String opId = getTaskCompleter().getOpId();
StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s", opId, status.name()));
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
// If terminal state update storage pool capacity and remove reservation for volumes capacity
// from pool's reserved capacity map.
StoragePool storagePool = null;
if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
StringMap reservationMap = storagePool.getReservedCapacityMap();
for (URI volumeId : getTaskCompleter().getIds()) {
// remove from reservation map
reservationMap.remove(volumeId.toString());
}
dbClient.persistObject(storagePool);
}
if (status == JobStatus.SUCCESS) {
List<URI> volumes = new ArrayList<URI>();
Calendar now = Calendar.getInstance();
URI volumeId = getTaskCompleter().getId();
volumes.add(volumeId);
for (Map.Entry<String, URI> entry : volumeIds.entrySet()) {
VolumeShowResponse volumeDetails = cinderApi.showVolume(entry.getKey());
processVolume(entry.getValue(), volumeDetails, dbClient, now, logMsgBuilder);
// Adjust the storage pool's capacity
CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, volumeDetails.volume.size, false);
}
} else if (status == JobStatus.FAILED) {
for (URI id : getTaskCompleter().getIds()) {
logMsgBuilder.append("\n");
logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, id.toString()));
Volume volume = dbClient.queryObject(Volume.class, id);
volume.setInactive(true);
dbClient.persistObject(volume);
}
}
logger.info(logMsgBuilder.toString());
} catch (Exception e) {
logger.error("Caught an exception while trying to updateStatus for CinderCreateVolumeJob", e);
setErrorStatus("Encountered an internal error during volume create job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class CinderVolumeExpandJob method updateStatus.
@Override
public void updateStatus(JobContext jobContext) throws Exception {
DbClient dbClient = jobContext.getDbClient();
try {
// Do nothing if the job is not completed yet
if (status == JobStatus.IN_PROGRESS) {
return;
}
String opId = getTaskCompleter().getOpId();
_logger.info(String.format("Updating status of job %s to %s", opId, status.name()));
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
URI volumeId = getTaskCompleter().getId();
// If terminal state update storage pool capacity and remove reservation for volume capacity
// from pool's reserved capacity map.
StoragePool storagePool = null;
if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
StringMap reservationMap = storagePool.getReservedCapacityMap();
// remove from reservation map
reservationMap.remove(volumeId.toString());
dbClient.persistObject(storagePool);
}
if (status == JobStatus.SUCCESS) {
VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
long oldCapacity = volume.getCapacity();
long newCapacity = taskCompleter.getSize();
// set requested capacity
volume.setCapacity(newCapacity / CinderConstants.BYTES_TO_GB);
volume.setProvisionedCapacity(taskCompleter.getSize());
volume.setAllocatedCapacity(taskCompleter.getSize());
dbClient.persistObject(volume);
long increasedCapacity = newCapacity - oldCapacity;
CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, String.valueOf(increasedCapacity / CinderConstants.BYTES_TO_GB), false);
}
} catch (Exception e) {
_logger.error("Caught an exception while trying to updateStatus for CinderExpandVolumeJob", e);
setErrorStatus("Encountered an internal error during expand volume job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
}
}
use of com.emc.storageos.db.client.model.StoragePool in project coprhd-controller by CoprHD.
the class DataDomainFileStorageDevice method doUnexport.
@Override
public BiosCommandResult doUnexport(StorageSystem storage, FileDeviceInputOutput args, List<FileExport> exportList) throws ControllerException {
try {
_log.info("DataDomainFileStorageDevice doUnexport {} - start", args.getFsId());
DataDomainClient ddClient = getDataDomainClient(storage);
if (ddClient == null) {
_log.error("doUnexport failed, provider unreachable");
String op = "FS unexport";
return BiosCommandResult.createErrorResult(DeviceControllerErrors.datadomain.operationFailedProviderInaccessible(op));
}
URI storagePoolId = args.getFs().getPool();
StoragePool storagePool = _dbClient.queryObject(StoragePool.class, storagePoolId);
FSExportMap currentExports = args.getFsExports();
ddDeleteExports(ddClient, storagePool.getNativeId(), currentExports, exportList);
_log.info("DataDomainFileStorageDevice doUnexport {} - complete", args.getFsId());
return BiosCommandResult.createSuccessfulResult();
} catch (DataDomainApiException e) {
_log.error("doUnexport failed, device error.", e);
return BiosCommandResult.createErrorResult(e);
}
}
Aggregations