use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class ControllerUtils method getSystemPortsOfSystem.
/**
* Query database to get storage ports of given storage systems
*
* @param dbClient
* @param systemURI
* @return list of storage system's storage ports
*/
public static List<StoragePort> getSystemPortsOfSystem(final DbClient dbClient, final URI systemURI) {
List<StoragePort> systemPorts = new ArrayList<StoragePort>();
URIQueryResultList portQueryResult = new URIQueryResultList();
try {
dbClient.queryByConstraint(ContainmentConstraint.Factory.getStorageDeviceStoragePortConstraint(systemURI), portQueryResult);
for (Iterator<URI> portResultItr = portQueryResult.iterator(); portResultItr.hasNext(); ) {
StoragePort port = dbClient.queryObject(StoragePort.class, portResultItr.next());
systemPorts.add(port);
}
} catch (DatabaseException e) {
// Even if one volume fails, no need to throw exception instead
// continue processing other volumes
s_logger.error("Unable to retrieve ports for system: {}", systemURI);
}
return systemPorts;
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class CinderStorageDevice method doCreateVolumes.
/*
* (non-Javadoc)
*
* @see com.emc.storageos.volumecontroller.BlockStorageDevice#doCreateVolumes
* (com.emc.storageos.db.client.model.StorageSystem,
* com.emc.storageos.db.client.model.StoragePool,
* java.lang.String, java.util.List,
* com.emc.storageos.volumecontroller.impl.utils.VirtualPoolCapabilityValuesWrapper,
* com.emc.storageos.volumecontroller.TaskCompleter)
*/
@Override
public void doCreateVolumes(StorageSystem storageSystem, StoragePool storagePool, String opId, List<Volume> volumes, VirtualPoolCapabilityValuesWrapper capabilities, TaskCompleter taskCompleter) throws DeviceControllerException {
String label = null;
Long capacity = null;
boolean opCreationFailed = false;
StringBuilder logMsgBuilder = new StringBuilder(String.format("Create Volume Start - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid()));
for (Volume volume : volumes) {
logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
String tenantName = "";
try {
TenantOrg tenant = dbClient.queryObject(TenantOrg.class, volume.getTenant().getURI());
tenantName = tenant.getLabel();
} catch (DatabaseException e) {
log.error("Error lookup TenantOrg object", e);
}
label = nameGenerator.generate(tenantName, volume.getLabel(), volume.getId().toString(), CinderConstants.CHAR_HYPHEN, SmisConstants.MAX_VOLUME_NAME_LENGTH);
if (capacity == null) {
capacity = volume.getCapacity();
}
}
log.info(logMsgBuilder.toString());
try {
CinderEndPointInfo ep = CinderUtils.getCinderEndPoint(storageSystem.getActiveProviderURI(), dbClient);
log.info("Getting the cinder APi for the provider with id {}", storageSystem.getActiveProviderURI());
CinderApi cinderApi = cinderApiFactory.getApi(storageSystem.getActiveProviderURI(), ep);
String volumeId = null;
Map<String, URI> volumeIds = new HashMap<String, URI>();
if (volumes.size() == 1) {
volumeId = cinderApi.createVolume(label, CinderUtils.convertToGB(capacity), storagePool.getNativeId());
volumeIds.put(volumeId, volumes.get(0).getId());
log.debug("Creating volume with the id {} on Openstack cinder node", volumeId);
} else {
log.debug("Starting to create {} volumes", volumes.size());
for (int volumeIndex = 0; volumeIndex < volumes.size(); volumeIndex++) {
volumeId = cinderApi.createVolume(label + CinderConstants.HYPHEN + (volumeIndex + 1), CinderUtils.convertToGB(capacity), storagePool.getNativeId());
volumeIds.put(volumeId, volumes.get(volumeIndex).getId());
log.debug("Creating volume with the id {} on Openstack cinder node", volumeId);
}
}
if (!volumeIds.isEmpty()) {
CinderJob createVolumeJob = (volumes.size() > 1) ? new CinderMultiVolumeCreateJob(volumeId, label, volumes.get(0).getStorageController(), CinderConstants.ComponentType.volume.name(), ep, taskCompleter, storagePool.getId(), volumeIds) : new CinderSingleVolumeCreateJob(volumeId, label, volumes.get(0).getStorageController(), CinderConstants.ComponentType.volume.name(), ep, taskCompleter, storagePool.getId(), volumeIds);
ControllerServiceImpl.enqueueJob(new QueueJob(createVolumeJob));
}
} catch (final InternalException e) {
log.error("Problem in doCreateVolumes: ", e);
opCreationFailed = true;
taskCompleter.error(dbClient, e);
} catch (final Exception e) {
log.error("Problem in doCreateVolumes: ", e);
opCreationFailed = true;
ServiceError serviceError = DeviceControllerErrors.cinder.operationFailed("doCreateVolumes", e.getMessage());
taskCompleter.error(dbClient, serviceError);
}
if (opCreationFailed) {
for (Volume vol : volumes) {
vol.setInactive(true);
dbClient.persistObject(vol);
}
}
logMsgBuilder = new StringBuilder(String.format("Create Volumes End - Array:%s, Pool:%s", storageSystem.getSerialNumber(), storagePool.getNativeGuid()));
for (Volume volume : volumes) {
logMsgBuilder.append(String.format("%nVolume:%s", volume.getLabel()));
}
log.info(logMsgBuilder.toString());
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class DataDomainStatsRecorder method addUsageInfo.
/**
* Adds capacity stats
*
* @param quota
* @param keyMap
* @param fsNativeGuid native Guid of the file share
* @param isilonApi
* @return the stat
*/
public Stat addUsageInfo(DDStatsCapacityInfo statsCapInfo, Map<String, Object> keyMap, String fsNativeGuid, DataDomainClient ddClient) {
Stat stat = zeroRecordGenerator.injectattr(keyMap, fsNativeGuid, null);
if (stat != null) {
try {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
long measurementTimePeriodInSec = 0;
DDStatsIntervalQuery granularity = (DDStatsIntervalQuery) keyMap.get(Constants._Granularity);
switch(granularity) {
case hour:
measurementTimePeriodInSec = HOUR_IN_SECONDS;
break;
case day:
measurementTimePeriodInSec = DAY_IN_SECONDS;
break;
case week:
measurementTimePeriodInSec = WEEK_IN_SECONDS;
break;
}
stat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
// DD returns epochs (seconds) - convert to ms
stat.setTimeInMillis(statsCapInfo.getCollectionEpoch() * 1000);
long used = statsCapInfo.getLogicalCapacity().getUsed();
long total = statsCapInfo.getLogicalCapacity().getTotal();
// Convert data written from Bytes/sec to Bytes
long preCompressionBytesWritten = 0;
long postCompressionBytesWritten = 0;
float compressionFactor = 1;
if ((statsCapInfo != null) && (statsCapInfo.getDataWritten() != null)) {
preCompressionBytesWritten = statsCapInfo.getDataWritten().getPreCompWritten();
postCompressionBytesWritten = statsCapInfo.getDataWritten().getPostCompWritten();
compressionFactor = statsCapInfo.getDataWritten().getCompressionFactor();
}
keyMap.put(Constants._FilePreCompressionBytesWritten, preCompressionBytesWritten);
keyMap.put(Constants._FilePostCompressionBytesWritten, postCompressionBytesWritten);
keyMap.put(Constants._CompressionRatio, compressionFactor);
// Provisioned capacity is not available for mtrees
stat.setAllocatedCapacity(used);
stat.setBandwidthIn(preCompressionBytesWritten);
statsColumnInjector.injectColumns(stat, dbClient);
_log.debug(String.format("Stat: %s: %s: provisioned(): used(%s)", stat.getResourceId(), fsNativeGuid, used));
} catch (DatabaseException ex) {
_log.error("Query to db failed for FileShare id {}, skipping recording usage stat.", stat.getResourceId(), ex);
}
}
return stat;
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class IsilonStatsRecorder method addUsageStat.
/**
* Adds a Stat for usage from the IsilonQuota.
*
* @param quota
* @param keyMap
* @param fsNativeGuid native Guid of the file share
* @param isilonApi
* @return the stat
*/
public Stat addUsageStat(IsilonSmartQuota quota, Map<String, Object> keyMap, String fsNativeGuid, IsilonApi isilonApi) {
Stat stat = zeroRecordGenerator.injectattr(keyMap, fsNativeGuid, FileShare.class);
if (stat != null) {
try {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
stat.setTimeInMillis((Long) keyMap.get(Constants._TimeCollected));
stat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
statsColumnInjector.injectColumns(stat, dbClient);
long provisionedCapacity = 0L;
Thresholds threshold = quota.getThresholds();
if (threshold != null && threshold.getHard() != null) {
provisionedCapacity = threshold.getHard();
}
stat.setProvisionedCapacity(provisionedCapacity);
long usedCapacity = quota.getUsagePhysical();
stat.setAllocatedCapacity(usedCapacity);
URIQueryResultList snapURIList = new URIQueryResultList();
dbClient.queryByConstraint(ContainmentConstraint.Factory.getFileshareSnapshotConstraint(stat.getResourceId()), snapURIList);
// Set snapshot count.
// Set snapshot size. Get current data for snapshot size (snapshot size changes dynamically).
int snapCount = 0;
long fsSnapshotSize = 0;
IsilonSnapshot isiSnap;
for (URI snapURI : snapURIList) {
Snapshot snap = dbClient.queryObject(Snapshot.class, snapURI);
// Filter out deleted Snapshot
if (snap != null && (!snap.getInactive())) {
String nativeId = snap.getNativeId();
try {
isiSnap = isilonApi.getSnapshot(nativeId);
} catch (IsilonException iex) {
_log.error(String.format("Stat: %s: can not get snapshot size for snapshot: %s", fsNativeGuid, nativeId), iex);
continue;
}
snapCount++;
fsSnapshotSize += Long.valueOf(isiSnap.getSize());
}
}
stat.setSnapshotCount(snapCount);
_log.debug(String.format("Stat: %s: snapshot count: %s", fsNativeGuid, snapCount));
stat.setSnapshotCapacity(fsSnapshotSize);
_log.debug(String.format("Stat: %s: snapshot size: %s", fsNativeGuid, fsSnapshotSize));
_log.debug(String.format("Stat: %s: %s: provisioned capacity(%s): used capacity(%s)", stat.getResourceId(), fsNativeGuid, provisionedCapacity, usedCapacity));
} catch (DatabaseException ex) {
_log.error("Query to db failed for FileShare id {}, skipping recording usage stat.", stat.getResourceId(), ex);
}
}
return stat;
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class RPDeviceController method setProtectionSetStatus.
private void setProtectionSetStatus(RecoverPointVolumeProtectionInfo volumeProtectionInfo, String protectionSetStatus, ProtectionSystem system) {
//
if (volumeProtectionInfo.getRpVolumeCurrentProtectionStatus() == RecoverPointVolumeProtectionInfo.volumeProtectionStatus.PROTECTED_SOURCE) {
URIQueryResultList list = new URIQueryResultList();
Constraint constraint = ContainmentConstraint.Factory.getProtectionSystemProtectionSetConstraint(system.getId());
try {
_dbClient.queryByConstraint(constraint, list);
Iterator<URI> it = list.iterator();
while (it.hasNext()) {
URI protectionSetId = it.next();
_log.info("Check protection set ID: " + protectionSetId);
ProtectionSet protectionSet;
protectionSet = _dbClient.queryObject(ProtectionSet.class, protectionSetId);
if (protectionSet.getInactive() == false) {
_log.info("Change the status to: " + protectionSetStatus);
protectionSet.setProtectionStatus(protectionSetStatus);
_dbClient.updateObject(protectionSet);
break;
}
}
} catch (DatabaseException e) {
// Don't worry about this
}
} else {
_log.info("Did not pause the protection source. Not updating protection status");
}
}
Aggregations