use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class MetaVolumeTypeProcessor method processResult.
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
try {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
WBEMClient client = SMICommunicationInterface.getCIMClient(keyMap);
CIMObjectPath metaVolumePath = getObjectPathfromCIMArgument(_args);
if (metaVolumePath == null) {
_logger.info(String.format("MetaVolumePath is null."));
} else {
_logger.info(String.format("Processing EMC_Meta for meta volume: %s", metaVolumePath));
UnManagedVolume preExistingVolume = null;
String isMetaVolume = "true";
String nativeGuid;
// Check if storage volume exists in db (the method is called from re-discovery context).
nativeGuid = getVolumeNativeGuid(metaVolumePath);
Volume storageVolume = checkStorageVolumeExistsInDB(nativeGuid, dbClient);
if (null == storageVolume || storageVolume.getInactive()) {
// Check if unmanaged volume exists in db (the method is called from unmanaged volumes discovery context).
nativeGuid = getUnManagedVolumeNativeGuidFromVolumePath(metaVolumePath);
_logger.debug("Meta volume nativeguid :" + nativeGuid);
preExistingVolume = checkUnManagedVolumeExistsInDB(nativeGuid, dbClient);
if (null == preExistingVolume) {
_logger.debug("Volume Info Object not found :" + nativeGuid);
return;
}
isMetaVolume = preExistingVolume.getVolumeCharacterstics().get(UnManagedVolume.SupportedVolumeCharacterstics.IS_METAVOLUME.toString());
} else {
_logger.debug("Volume managed by Bourne :" + storageVolume.getNativeGuid());
isMetaVolume = storageVolume.getIsComposite().toString();
}
if (isMetaVolume.equalsIgnoreCase("false")) {
_logger.error(String.format("MetaVolumeTypeProcessor called for regular volume: %s", nativeGuid));
return;
}
final Iterator<?> it = (Iterator<?>) resultObj;
if (it.hasNext()) {
final CIMObjectPath symmMetaPath = (CIMObjectPath) it.next();
_logger.debug(String.format("Processing EMC_Meta: %s", symmMetaPath));
CIMInstance cimMeta = client.getInstance(symmMetaPath, false, false, STRIPE_EXTENTS_NUMBER);
CIMProperty stripeLengthProperty = cimMeta.getProperty(SmisConstants.CP_EXTENT_STRIPE_LENGTH);
Long stripeLength = Long.valueOf(stripeLengthProperty.getValue().toString());
String metaVolumeType;
if (stripeLength < 1) {
_logger.error(String.format("Stripe length for EMC_Meta is less than 1: %s", stripeLength));
return;
} else if (stripeLength == 1) {
// this is concatenated meta volume
_logger.debug(String.format("Stripe length for EMC_Meta is : %s. Type is concatenated.", stripeLength));
metaVolumeType = Volume.CompositionType.CONCATENATED.toString();
} else {
// this is striped meta volume
_logger.debug(String.format("Stripe length for EMC_Meta is : %s. Type is striped.", stripeLength));
metaVolumeType = Volume.CompositionType.STRIPED.toString();
}
_logger.info(String.format("Meta volume: %s, type: %s", metaVolumePath, metaVolumeType));
if (null == preExistingVolume) {
// storage volume update
storageVolume.setCompositionType(metaVolumeType);
// persist volume in db
dbClient.persistObject(storageVolume);
} else {
// unmanaged volume update
StringSet metaVolumeTypeSet = new StringSet();
metaVolumeTypeSet.add(metaVolumeType);
preExistingVolume.putVolumeInfo(UnManagedVolume.SupportedVolumeInformation.META_VOLUME_TYPE.toString(), metaVolumeTypeSet);
// for this volume.
if (Volume.CompositionType.STRIPED.toString().equalsIgnoreCase(metaVolumeType)) {
URI storageSystemUri = preExistingVolume.getStorageSystemUri();
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, storageSystemUri);
if (DiscoveredDataObject.Type.vmax.toString().equalsIgnoreCase(storageSystem.getSystemType())) {
_logger.info("Check matched vpool list for vmax striped meta volume and remove fastExpansion vpools.");
StringSet matchedVirtualPools = preExistingVolume.getSupportedVpoolUris();
if (matchedVirtualPools != null && !matchedVirtualPools.isEmpty()) {
_logger.debug("Matched Pools :" + Joiner.on("\t").join(matchedVirtualPools));
StringSet newMatchedPools = new StringSet();
boolean needToReplace = false;
for (String vPoolUriStr : matchedVirtualPools) {
URI vPoolUri = new URI(vPoolUriStr);
VirtualPool virtualPool = dbClient.queryObject(VirtualPool.class, vPoolUri);
// null check since supported vPool list in UnManagedVolume may contain inactive vPool
if (virtualPool != null && !virtualPool.getFastExpansion()) {
newMatchedPools.add(vPoolUriStr);
} else {
needToReplace = true;
}
}
if (needToReplace) {
matchedVirtualPools.replace(newMatchedPools);
_logger.info("Replaced VPools : {}", Joiner.on("\t").join(preExistingVolume.getSupportedVpoolUris()));
}
}
}
}
// persist volume in db
dbClient.updateAndReindexObject(preExistingVolume);
}
}
}
} catch (Exception e) {
_logger.error("Processing meta volume type information failed :", e);
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class ExtendedCommunicationInterfaceImpl method injectStats.
/**
* Inject Stats to Cassandra. To-Do: To verify, how fast batch insertion is
* working for entries in 1000s. If its taking time, then will need to work
* out, splitting again the batch into smaller batches.
*
* @throws BaseCollectionException
*/
protected void injectStats() throws BaseCollectionException {
DbClient client = (DbClient) _keyMap.get(Constants.dbClient);
@SuppressWarnings("unchecked") List<Stat> stats = (List<Stat>) _keyMap.get(Constants._Stats);
@SuppressWarnings("unchecked") Map<String, String> props = (Map<String, String>) _keyMap.get(Constants.PROPS);
String collectionType = props.get(Constants.METERING_COLLECTION_TYPE);
if (collectionType != null && Constants.METERING_COLLECTION_TYPE_FULL.equalsIgnoreCase(collectionType)) {
_logger.info("Started Injection of Stats to Cassandra");
// insert in batches
int size = Constants.DEFAULT_PARTITION_SIZE;
if (null != props.get(Constants.METERING_RECORDS_PARTITION_SIZE)) {
size = Integer.parseInt(props.get(Constants.METERING_RECORDS_PARTITION_SIZE));
}
if (null == _partitionManager) {
Stat[] statBatch = new Stat[stats.size()];
statBatch = stats.toArray(statBatch);
try {
client.insertTimeSeries(StatTimeSeries.class, statBatch);
_logger.info("{} Stat records persisted to DB", statBatch.length);
} catch (DatabaseException e) {
_logger.error("Error inserting records into the database", e);
}
} else {
_partitionManager.insertInBatches(stats, size, client);
}
} else {
_logger.info("Stat records not persisted to DB");
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class CommitedSettingsInstanceProcessor method processResult.
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
try {
CIMInstance modifiedInstance = getObjectPathfromCIMArgument();
DbClient _dbClient = (DbClient) keyMap.get(Constants.dbClient);
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
CIMObjectPath poolSettingPath = modifiedInstance.getObjectPath();
String poolSettingId = poolSettingPath.getKey(Constants.INSTANCEID).getValue().toString();
CIMObjectPath poolCapabilities_Associated_With_Setting = CimObjectPathCreator.createInstance(keyMap.get(poolSettingPath.toString()).toString());
String poolID = getNativeIDFromInstance(poolCapabilities_Associated_With_Setting.getKey(Constants.INSTANCEID).getValue().toString());
StorageSystem device = getStorageSystem(_dbClient, profile.getSystemId());
StoragePool pool = checkStoragePoolExistsInDB(poolID, _dbClient, device);
int tierMethodologyUsedForThisCreatedSetting = Integer.parseInt((String) keyMap.get(poolSettingPath.toString() + Constants.HYPHEN + Constants.TIERMETHODOLOGY));
if (null != pool) {
updatePoolSettingId(tierMethodologyUsedForThisCreatedSetting, pool, poolSettingId);
_dbClient.persistObject(pool);
}
} catch (Exception e) {
_logger.error("Commiting Modified Settign Instance failed", e);
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class NetAppStatsRecorder method addUsageStat.
/**
* Adds a Stat for usage.
*
* @param fsNativeGuid native Guid of the file share
* @param keyMap
* @param provisioned
* @param usage
* @return the stat
*/
public Stat addUsageStat(String fsNativeGuid, Map<String, Object> keyMap, Map<String, Number> metrics) {
Stat stat = zeroRecordGenerator.injectattr(keyMap, fsNativeGuid, null);
if (stat != null) {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
stat.setTimeInMillis((Long) keyMap.get(Constants._TimeCollected));
stat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
statsColumnInjector.injectColumns(stat, dbClient);
stat.setProvisionedCapacity((Long) metrics.get(Constants.SIZE_TOTAL));
stat.setAllocatedCapacity((Long) metrics.get(Constants.SIZE_USED));
stat.setSnapshotCapacity((Long) metrics.get(Constants.SNAPSHOT_BYTES_RESERVED));
stat.setSnapshotCount((Integer) metrics.get(Constants.SNAPSHOT_COUNT));
_log.debug(String.format("Stat: %s: %s: provisioned(%s): used(%s)", stat.getResourceId(), fsNativeGuid, stat.getProvisionedCapacity(), stat.getAllocatedCapacity()));
_log.debug(String.format("Stat: %s: %s: snapshot capacity (%s), count (%s)", stat.getResourceId(), fsNativeGuid, stat.getSnapshotCapacity(), stat.getSnapshotCount()));
}
return stat;
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class FEPortStatsProcessor method processResult.
@SuppressWarnings("unchecked")
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws SMIPluginException {
try {
CIMArgument<?>[] outputArguments = (CIMArgument<?>[]) resultObj;
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
List<Stat> metricsObjList = (List<Stat>) keyMap.get(Constants._Stats);
List<String> metricSequence = (List<String>) keyMap.get(Constants.STORAGEOS_FEPORT_MANIFEST);
String[] feportsMetricValues = ((String[]) outputArguments[0].getValue())[0].split("\n");
List<StoragePort> systemPorts = ControllerUtils.getSystemPortsOfSystem(dbClient, profile.getSystemId());
_logger.debug("FEPort metricNames Sequence {}", metricSequence);
// process the results.
if (null != metricSequence && !metricSequence.isEmpty()) {
// Step2: For each feport metric record
for (String fePortMetricValue : feportsMetricValues) {
if (fePortMetricValue.isEmpty()) {
_logger.debug("Empty FEPort stats returned as part of Statistics Response");
continue;
}
String[] metrics = fePortMetricValue.split(Constants.SEMI_COLON);
// Step 3: For each port in db for a given system.
for (StoragePort port : systemPorts) {
// Step 4: if port in db is null just continue.
if (null == port) {
continue;
} else if (!port.getInactive() && metrics[0].endsWith(port.getPortName())) {
// Step 5: Check whether provider returned port
// exists in db or not. if port exists in db,
// then create a PortStat object for it.
_logger.debug("found FEPort in db for {}", port.getPortName());
createPortStatMetric(metricSequence, port, keyMap, metricsObjList, metrics);
}
}
}
//
// compute port metric to trigger if any port allocation qualification changed. If there is
// changes, run vpool matcher
//
portMetricsProcessor.triggerVpoolMatcherIfPortAllocationQualificationChanged(profile.getSystemId(), systemPorts);
//
// compute storage system's average of port metrics. Then, persist it into storage system object.
//
portMetricsProcessor.computeStorageSystemAvgPortMetrics(profile.getSystemId());
// Compute port group's port metrics for vmax only
portMetricsProcessor.computePortGroupMetrics(profile.getSystemId());
} else {
_logger.error("failed processing FEPOrt Metric values as metric sequence is null.");
}
} catch (Exception e) {
_logger.error("Failed while extracting stats for FEPorts: ", e);
}
resultObj = null;
}
Aggregations