use of com.emc.storageos.cinder.api.CinderApi in project coprhd-controller by CoprHD.
the class CinderStorageDevice method doDeleteVolumes.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.BlockStorageDevice#doDeleteVolumes
* (com.emc.storageos.db.client.model.StorageSystem,
* java.lang.String,
* java.util.List,
* com.emc.storageos.volumecontroller.TaskCompleter)
*/
@Override
public void doDeleteVolumes(StorageSystem storageSystem, String opId, List<Volume> volumes, TaskCompleter taskCompleter) throws DeviceControllerException {
try {
List<String> volumeNativeIdsToDelete = new ArrayList<String>(volumes.size());
List<String> volumeLabels = new ArrayList<String>(volumes.size());
StringBuilder logMsgBuilder = new StringBuilder(String.format("Delete Volume Start - Array:%s", storageSystem.getSerialNumber()));
log.info(logMsgBuilder.toString());
MultiVolumeTaskCompleter multiVolumeTaskCompleter = (MultiVolumeTaskCompleter) taskCompleter;
CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storageSystem.getActiveProviderURI(), dbClient);
log.info("Getting the cinder APi for the provider with id " + storageSystem.getActiveProviderURI());
CinderApi cinderApi = cinderApiFactory.getApi(storageSystem.getActiveProviderURI(), ep);
for (Volume volume : volumes) {
logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
try {
// Check if the volume is present on the back-end device
cinderApi.showVolume(volume.getNativeId());
} catch (CinderException ce) {
// This means, the volume is not present on the back-end device
log.info(String.format("Volume %s already deleted: ", volume.getNativeId()));
volume.setInactive(true);
dbClient.persistObject(volume);
VolumeTaskCompleter deleteTaskCompleter = multiVolumeTaskCompleter.skipTaskCompleter(volume.getId());
deleteTaskCompleter.ready(dbClient);
continue;
}
volumeNativeIdsToDelete.add(volume.getNativeId());
volumeLabels.add(volume.getLabel());
// cleanup if there are any snapshots created for a volume
cleanupAnyBackupSnapshots(volume, cinderApi);
}
// Now - trigger the delete
if (!multiVolumeTaskCompleter.isVolumeTaskCompletersEmpty()) {
cinderApi.deleteVolumes(volumeNativeIdsToDelete.toArray(new String[] {}));
ControllerServiceImpl.enqueueJob(new QueueJob(new CinderDeleteVolumeJob(volumeNativeIdsToDelete.get(0), volumeLabels.get(0), volumes.get(0).getStorageController(), CinderConstants.ComponentType.volume.name(), ep, taskCompleter)));
} else {
// If we are here, there are no volumes to delete, we have
// invoked ready() for the VolumeDeleteCompleter, and told
// the multiVolumeTaskCompleter to skip these completers.
// In this case, the multiVolumeTaskCompleter complete()
// method will not be invoked and the result is that the
// workflow that initiated this delete request will never
// be updated. So, here we just call complete() on the
// multiVolumeTaskCompleter to ensure the workflow status is
// updated.
multiVolumeTaskCompleter.ready(dbClient);
}
} catch (Exception e) {
log.error("Problem in doDeleteVolume: ", e);
ServiceError error = DeviceControllerErrors.cinder.operationFailed("doDeleteVolume", e.getMessage());
taskCompleter.error(dbClient, error);
}
StringBuilder logMsgBuilder = new StringBuilder(String.format("Delete Volume End - Array: %s", storageSystem.getSerialNumber()));
for (Volume volume : volumes) {
logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
}
log.info(logMsgBuilder.toString());
}
use of com.emc.storageos.cinder.api.CinderApi in project coprhd-controller by CoprHD.
the class CinderJob method poll.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.Job#poll(com.emc.storageos.volumecontroller.JobContext, long)
*/
@Override
public JobPollResult poll(JobContext jobContext, long trackingPeriodInMillis) {
String messageId = jobId;
try {
StorageSystem storageSystem = jobContext.getDbClient().queryObject(StorageSystem.class, storageSystemURI);
logger.info("CinderJob: Looking up job: id {}, provider: {} ", messageId, storageSystem.getActiveProviderURI());
CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), this.epInfo);
if (cinderApi == null) {
String errorMessage = "No Cinder client found for provider ip: " + storageSystem.getActiveProviderURI();
processTransientError(messageId, trackingPeriodInMillis, errorMessage, null);
} else {
// Gets the current status of the task ( volume creation, snapshot creation etc )
String currentStatus = getCurrentStatus(cinderApi);
pollResult.setJobName(jobName);
pollResult.setJobId(jobId);
if (isJobSucceeded(currentStatus)) {
status = JobStatus.SUCCESS;
pollResult.setJobPercentComplete(100);
logger.info("CinderJob: {} succeeded", messageId);
} else if (isJobFailed(currentStatus)) {
status = JobStatus.FAILED;
pollResult.setJobPercentComplete(100);
logger.error("CinderJob: {} failed; Details: {}", jobName, errorDescription);
}
}
} catch (Exception e) {
processTransientError(messageId, trackingPeriodInMillis, e.getMessage(), e);
} finally {
try {
updateStatus(jobContext);
} catch (Exception e) {
setErrorStatus(e.getMessage());
logger.error("Problem while trying to update status", e);
}
}
pollResult.setJobStatus(status);
pollResult.setErrorDescription(errorDescription);
return pollResult;
}
use of com.emc.storageos.cinder.api.CinderApi in project coprhd-controller by CoprHD.
the class CinderCloneOperations method createSingleClone.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.CloneOperations#createSingleClone(
* com.emc.storageos.db.client.model.StorageSystem, java.net.URI, java.net.URI,
* java.lang.Boolean,
* com.emc.storageos.volumecontroller.TaskCompleter)
*/
@Override
public void createSingleClone(StorageSystem storageSystem, URI sourceObject, URI cloneVolume, Boolean createInactive, TaskCompleter taskCompleter) {
log.info("START createSingleClone operation");
boolean isVolumeClone = true;
try {
BlockObject sourceObj = BlockObject.fetch(dbClient, sourceObject);
URI tenantUri = null;
if (sourceObj instanceof BlockSnapshot) {
// In case of snapshot, get the tenant from its parent volume
NamedURI parentVolUri = ((BlockSnapshot) sourceObj).getParent();
Volume parentVolume = dbClient.queryObject(Volume.class, parentVolUri);
tenantUri = parentVolume.getTenant().getURI();
isVolumeClone = false;
} else {
// This is a default flow
tenantUri = ((Volume) sourceObj).getTenant().getURI();
isVolumeClone = true;
}
Volume cloneObj = dbClient.queryObject(Volume.class, cloneVolume);
StoragePool targetPool = dbClient.queryObject(StoragePool.class, cloneObj.getPool());
TenantOrg tenantOrg = dbClient.queryObject(TenantOrg.class, tenantUri);
// String cloneLabel = generateLabel(tenantOrg, cloneObj);
CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storageSystem.getActiveProviderURI(), dbClient);
log.info("Getting the cinder APi for the provider with id " + storageSystem.getActiveProviderURI());
CinderApi cinderApi = cinderApiFactory.getApi(storageSystem.getActiveProviderURI(), ep);
String volumeId = "";
if (isVolumeClone) {
volumeId = cinderApi.cloneVolume(cloneObj.getLabel(), (cloneObj.getCapacity() / (1024 * 1024 * 1024)), targetPool.getNativeId(), sourceObj.getNativeId());
} else {
volumeId = cinderApi.createVolumeFromSnapshot(cloneObj.getLabel(), (cloneObj.getCapacity() / (1024 * 1024 * 1024)), targetPool.getNativeId(), sourceObj.getNativeId());
}
log.debug("Creating volume with the id " + volumeId + " on Openstack cinder node");
if (volumeId != null) {
// Cinder volume/snapshot clones are not sync with source, so
// set the replication state as DETACHED
cloneObj.setReplicaState(ReplicationState.DETACHED.name());
dbClient.persistObject(cloneObj);
Map<String, URI> volumeIds = new HashMap<String, URI>();
volumeIds.put(volumeId, cloneObj.getId());
ControllerServiceImpl.enqueueJob(new QueueJob(new CinderSingleVolumeCreateJob(volumeId, cloneObj.getLabel(), storageSystem.getId(), CinderConstants.ComponentType.volume.name(), ep, taskCompleter, targetPool.getId(), volumeIds)));
}
} catch (InternalException e) {
String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
log.error(errorMsg, e);
taskCompleter.error(dbClient, e);
} catch (Exception e) {
String errorMsg = String.format(CREATE_ERROR_MSG_FORMAT, sourceObject, cloneVolume);
log.error(errorMsg, e);
ServiceError serviceError = DeviceControllerErrors.cinder.operationFailed("createSingleClone", e.getMessage());
taskCompleter.error(dbClient, serviceError);
}
}
use of com.emc.storageos.cinder.api.CinderApi in project coprhd-controller by CoprHD.
the class AbstractCinderVolumeCreateJob method updateStatus.
/**
* Called to update the job status when the volume create job completes.
* This is common update code for volume create operations.
*
* @param jobContext The job context.
*/
@Override
public void updateStatus(JobContext jobContext) throws Exception {
DbClient dbClient = jobContext.getDbClient();
try {
// Do nothing if the job is not completed yet
if (status == JobStatus.IN_PROGRESS) {
return;
}
String opId = getTaskCompleter().getOpId();
StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s", opId, status.name()));
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
// If terminal state update storage pool capacity and remove reservation for volumes capacity
// from pool's reserved capacity map.
StoragePool storagePool = null;
if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
StringMap reservationMap = storagePool.getReservedCapacityMap();
for (URI volumeId : getTaskCompleter().getIds()) {
// remove from reservation map
reservationMap.remove(volumeId.toString());
}
dbClient.persistObject(storagePool);
}
if (status == JobStatus.SUCCESS) {
List<URI> volumes = new ArrayList<URI>();
Calendar now = Calendar.getInstance();
URI volumeId = getTaskCompleter().getId();
volumes.add(volumeId);
for (Map.Entry<String, URI> entry : volumeIds.entrySet()) {
VolumeShowResponse volumeDetails = cinderApi.showVolume(entry.getKey());
processVolume(entry.getValue(), volumeDetails, dbClient, now, logMsgBuilder);
// Adjust the storage pool's capacity
CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, volumeDetails.volume.size, false);
}
} else if (status == JobStatus.FAILED) {
for (URI id : getTaskCompleter().getIds()) {
logMsgBuilder.append("\n");
logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, id.toString()));
Volume volume = dbClient.queryObject(Volume.class, id);
volume.setInactive(true);
dbClient.persistObject(volume);
}
}
logger.info(logMsgBuilder.toString());
} catch (Exception e) {
logger.error("Caught an exception while trying to updateStatus for CinderCreateVolumeJob", e);
setErrorStatus("Encountered an internal error during volume create job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
}
}
use of com.emc.storageos.cinder.api.CinderApi in project coprhd-controller by CoprHD.
the class CinderVolumeExpandJob method updateStatus.
@Override
public void updateStatus(JobContext jobContext) throws Exception {
DbClient dbClient = jobContext.getDbClient();
try {
// Do nothing if the job is not completed yet
if (status == JobStatus.IN_PROGRESS) {
return;
}
String opId = getTaskCompleter().getOpId();
_logger.info(String.format("Updating status of job %s to %s", opId, status.name()));
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
URI volumeId = getTaskCompleter().getId();
// If terminal state update storage pool capacity and remove reservation for volume capacity
// from pool's reserved capacity map.
StoragePool storagePool = null;
if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
StringMap reservationMap = storagePool.getReservedCapacityMap();
// remove from reservation map
reservationMap.remove(volumeId.toString());
dbClient.persistObject(storagePool);
}
if (status == JobStatus.SUCCESS) {
VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
long oldCapacity = volume.getCapacity();
long newCapacity = taskCompleter.getSize();
// set requested capacity
volume.setCapacity(newCapacity);
volume.setProvisionedCapacity(taskCompleter.getSize());
volume.setAllocatedCapacity(taskCompleter.getSize());
dbClient.persistObject(volume);
long increasedCapacity = newCapacity - oldCapacity;
CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, String.valueOf(increasedCapacity / CinderConstants.BYTES_TO_GB), false);
}
} catch (Exception e) {
_logger.error("Caught an exception while trying to updateStatus for CinderExpandVolumeJob", e);
setErrorStatus("Encountered an internal error during expand volume job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
}
}
Aggregations