use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class CinderUtils method getCinderEndPoint.
/**
* Gets the cinder endpoint info to access the endpoint
*
* @param storageProviderURi
* @return
*/
public static CinderEndPointInfo getCinderEndPoint(URI storageProviderURi, DbClient dbClient) {
StorageProvider provider = dbClient.queryObject(StorageProvider.class, storageProviderURi);
// Get the persisted end point info
StringMap endPointKeys = provider.getKeys();
String hostName = endPointKeys.get(CinderConstants.KEY_CINDER_HOST_NAME);
String password = endPointKeys.get(CinderConstants.KEY_CINDER_REST_PASSWORD);
String userName = endPointKeys.get(CinderConstants.KEY_CINDER_REST_USER);
String tenantName = endPointKeys.get(CinderConstants.KEY_CINDER_TENANT_NAME);
String tenantId = endPointKeys.get(CinderConstants.KEY_CINDER_TENANT_ID);
String baseUri = endPointKeys.get(CinderConstants.KEY_CINDER_REST_URI_BASE);
String token = endPointKeys.get(CinderConstants.KEY_CINDER_REST_TOKEN);
CinderEndPointInfo ep = new CinderEndPointInfo(hostName, userName, password, tenantName);
if (baseUri.startsWith(CinderConstants.HTTP_URL)) {
ep.setCinderBaseUriHttp(baseUri);
} else {
ep.setCinderBaseUriHttps(baseUri);
}
ep.setCinderToken(token);
ep.setCinderTenantId(tenantId);
return ep;
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class CinderExportOperations method removeITLsFromVolume.
/**
* Remove the list of ITLs from the volume extensions
*
* @param volume
* @return
*/
private void removeITLsFromVolume(Volume volume) {
StringMap extensions = volume.getExtensions();
Set<Map.Entry<String, String>> mapEntries = extensions.entrySet();
for (Iterator<Map.Entry<String, String>> it = mapEntries.iterator(); it.hasNext(); ) {
Map.Entry<String, String> entry = it.next();
if (entry.getKey().startsWith(CinderConstants.PREFIX_ITL)) {
it.remove();
}
}
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class CinderExportOperations method removeVolumes.
@Override
public void removeVolumes(StorageSystem storage, URI exportMaskId, List<URI> volumeURIs, List<Initiator> initiatorList, TaskCompleter taskCompleter) throws DeviceControllerException {
log.info("{} removeVolumes START...", storage.getSerialNumber());
try {
log.info("removeVolumes: Export mask id: {}", exportMaskId);
log.info("removeVolumes: volumes: {}", Joiner.on(',').join(volumeURIs));
if (initiatorList != null) {
log.info("removeVolumes: impacted initiators: {}", Joiner.on(",").join(initiatorList));
}
ExportMask exportMask = dbClient.queryObject(ExportMask.class, exportMaskId);
List<Volume> volumes = new ArrayList<Volume>();
List<Initiator> userAddedInitiatorList = new ArrayList<Initiator>();
StringMap initiators = exportMask.getUserAddedInitiators();
for (URI volumeURI : volumeURIs) {
Volume volume = dbClient.queryObject(Volume.class, volumeURI);
volumes.add(volume);
}
for (String ini : initiators.values()) {
Initiator initiator = dbClient.queryObject(Initiator.class, URI.create(ini));
userAddedInitiatorList.add(initiator);
}
detachVolumesFromInitiators(storage, volumes, userAddedInitiatorList);
taskCompleter.ready(dbClient);
} catch (final Exception ex) {
log.error("Problem in RemoveVolumes: ", ex);
ServiceError serviceError = DeviceControllerErrors.cinder.operationFailed("doRemoveVolumes", ex.getMessage());
taskCompleter.error(dbClient, serviceError);
}
log.info("{} removeVolumes END...", storage.getSerialNumber());
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class AbstractCinderVolumeCreateJob method updateStatus.
/**
* Called to update the job status when the volume create job completes.
* This is common update code for volume create operations.
*
* @param jobContext The job context.
*/
@Override
public void updateStatus(JobContext jobContext) throws Exception {
DbClient dbClient = jobContext.getDbClient();
try {
// Do nothing if the job is not completed yet
if (status == JobStatus.IN_PROGRESS) {
return;
}
String opId = getTaskCompleter().getOpId();
StringBuilder logMsgBuilder = new StringBuilder(String.format("Updating status of job %s to %s", opId, status.name()));
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
// If terminal state update storage pool capacity and remove reservation for volumes capacity
// from pool's reserved capacity map.
StoragePool storagePool = null;
if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
StringMap reservationMap = storagePool.getReservedCapacityMap();
for (URI volumeId : getTaskCompleter().getIds()) {
// remove from reservation map
reservationMap.remove(volumeId.toString());
}
dbClient.persistObject(storagePool);
}
if (status == JobStatus.SUCCESS) {
List<URI> volumes = new ArrayList<URI>();
Calendar now = Calendar.getInstance();
URI volumeId = getTaskCompleter().getId();
volumes.add(volumeId);
for (Map.Entry<String, URI> entry : volumeIds.entrySet()) {
VolumeShowResponse volumeDetails = cinderApi.showVolume(entry.getKey());
processVolume(entry.getValue(), volumeDetails, dbClient, now, logMsgBuilder);
// Adjust the storage pool's capacity
CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, volumeDetails.volume.size, false);
}
} else if (status == JobStatus.FAILED) {
for (URI id : getTaskCompleter().getIds()) {
logMsgBuilder.append("\n");
logMsgBuilder.append(String.format("Task %s failed to create volume: %s", opId, id.toString()));
Volume volume = dbClient.queryObject(Volume.class, id);
volume.setInactive(true);
dbClient.persistObject(volume);
}
}
logger.info(logMsgBuilder.toString());
} catch (Exception e) {
logger.error("Caught an exception while trying to updateStatus for CinderCreateVolumeJob", e);
setErrorStatus("Encountered an internal error during volume create job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
}
}
use of com.emc.storageos.db.client.model.StringMap in project coprhd-controller by CoprHD.
the class CinderVolumeExpandJob method updateStatus.
@Override
public void updateStatus(JobContext jobContext) throws Exception {
DbClient dbClient = jobContext.getDbClient();
try {
// Do nothing if the job is not completed yet
if (status == JobStatus.IN_PROGRESS) {
return;
}
String opId = getTaskCompleter().getOpId();
_logger.info(String.format("Updating status of job %s to %s", opId, status.name()));
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, getStorageSystemURI());
CinderApi cinderApi = jobContext.getCinderApiFactory().getApi(storageSystem.getActiveProviderURI(), getEndPointInfo());
URI volumeId = getTaskCompleter().getId();
// If terminal state update storage pool capacity and remove reservation for volume capacity
// from pool's reserved capacity map.
StoragePool storagePool = null;
if (status == JobStatus.SUCCESS || status == JobStatus.FAILED) {
storagePool = dbClient.queryObject(StoragePool.class, storagePoolUri);
StringMap reservationMap = storagePool.getReservedCapacityMap();
// remove from reservation map
reservationMap.remove(volumeId.toString());
dbClient.persistObject(storagePool);
}
if (status == JobStatus.SUCCESS) {
VolumeExpandCompleter taskCompleter = (VolumeExpandCompleter) getTaskCompleter();
Volume volume = dbClient.queryObject(Volume.class, taskCompleter.getId());
long oldCapacity = volume.getCapacity();
long newCapacity = taskCompleter.getSize();
// set requested capacity
volume.setCapacity(newCapacity);
volume.setProvisionedCapacity(taskCompleter.getSize());
volume.setAllocatedCapacity(taskCompleter.getSize());
dbClient.persistObject(volume);
long increasedCapacity = newCapacity - oldCapacity;
CinderUtils.updateStoragePoolCapacity(dbClient, cinderApi, storagePool, String.valueOf(increasedCapacity / CinderConstants.BYTES_TO_GB), false);
}
} catch (Exception e) {
_logger.error("Caught an exception while trying to updateStatus for CinderExpandVolumeJob", e);
setErrorStatus("Encountered an internal error during expand volume job status processing : " + e.getMessage());
} finally {
super.updateStatus(jobContext);
}
}
Aggregations