use of com.emc.storageos.plugins.AccessProfile in project coprhd-controller by CoprHD.
the class XIVSupportedCopyTypesProcessor method processResult.
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
try {
_dbClient = (DbClient) keyMap.get(Constants.dbClient);
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
Map<URI, StoragePool> poolsToMatchWithVpool = (Map<URI, StoragePool>) keyMap.get(Constants.MODIFIED_STORAGEPOOLS);
StorageSystem device = getStorageSystem(_dbClient, profile.getSystemId());
Iterator<CIMInstance> iterator = (Iterator<CIMInstance>) resultObj;
while (iterator.hasNext()) {
CIMInstance instance = iterator.next();
CIMObjectPath poolPath = getObjectPathfromCIMArgument();
String instanceId = poolPath.getKeyValue(Constants.INSTANCEID).toString();
// instanceId is the pool's nativeId
StoragePool storagePool = checkStoragePoolExistsInDB(instanceId, _dbClient, device);
if (storagePool == null) {
_log.warn("No storage pool");
continue;
}
String thinProvisionedPreAllocateSupported = instance.getPropertyValue(Constants.THIN_PROVISIONED_CLIENT_SETTABLE_RESERVE).toString();
UnsignedInteger16[] copyTypes = (UnsignedInteger16[]) instance.getPropertyValue(Constants.SUPPORTED_COPY_TYPES);
addCopyTypesToStoragePool(copyTypes, storagePool, thinProvisionedPreAllocateSupported, poolsToMatchWithVpool);
}
} catch (Exception e) {
_log.error("Supported copy types processing failed: ", e);
}
}
use of com.emc.storageos.plugins.AccessProfile in project coprhd-controller by CoprHD.
the class ZeroRecordGenerator method identifyRecordstobeZeroed.
/**
* Say, Cache has 100 Volumes stored in it. The current Metering Collection
* results in 90 Volumes being retrieved from Providers. i.e. 10 Volumes
* might get deleted, and needs to be Zeroed. Logic below identifies the 10
* Missing Volumes and pushes to a Map,later Zero Stat Records would get
* generated for those 10 Volumes before pushing to Cassandra.
* * @param keyMap
*
* @param Volumes
*/
public void identifyRecordstobeZeroed(Map<String, Object> keyMap, List<Stat> metricsObjList, final Class clazz) {
try {
@SuppressWarnings("unchecked") Set<String> resourceIds = (Set<String>) keyMap.get(Constants._nativeGUIDs);
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
URI storageSystemURI = profile.getSystemId();
List<URI> volumeURIsInDB = extractVolumesOrFileSharesFromDB(storageSystemURI, dbClient, clazz);
Set<String> zeroedRecords = Sets.newHashSet();
Set<String> volumeURIsInDBSet = new HashSet<String>(Lists.transform(volumeURIsInDB, Functions.toStringFunction()));
// used Sets in Guava libraries, which has the ability to get us
// the difference without altering the Cache.
Sets.difference(volumeURIsInDBSet, resourceIds).copyInto(zeroedRecords);
if (!zeroedRecords.isEmpty()) {
_logger.info("Records Zeroed : {}", zeroedRecords.size());
// used in caching Volume Records
for (String record : zeroedRecords) {
Stat zeroStatRecord = injectattr(keyMap, record, clazz);
if (null != zeroStatRecord) {
generateZeroRecord(zeroStatRecord, keyMap);
metricsObjList.add(zeroStatRecord);
} else {
_logger.debug("Records need to get Zeroed doesn't have VolumeUUID : {}", record);
}
}
}
} catch (Exception ex) {
// No need to throw Exception just because Zeroing Records failed,
// continue with persisting other records
_logger.error("Error in Zeroing records :", ex);
}
}
use of com.emc.storageos.plugins.AccessProfile in project coprhd-controller by CoprHD.
the class FEAdaptStatsProcessor method processResult.
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
try {
CIMArgument<?>[] outputArguments = (CIMArgument<?>[]) resultObj;
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
StorageSystem system = dbClient.queryObject(StorageSystem.class, profile.getSystemId());
List<String> metricSequence = (List<String>) keyMap.get(Constants.STORAGEOS_FEADAPT_MANIFEST);
String[] feadaptMetricValues = ((String[]) outputArguments[0].getValue())[0].split("\n");
Map<String, StorageHADomain> haDomains = getHADomainOfSystem(dbClient, profile.getSystemId());
if (null == metricSequence || metricSequence.isEmpty()) {
_logger.error("No metric sequence for FEAdaptStatsProcessor; no processing will happen");
return;
}
for (String metricValue : feadaptMetricValues) {
if (metricValue.isEmpty()) {
continue;
}
String[] metrics = metricValue.split(Constants.SEMI_COLON);
String instanceId = metrics[0];
String instanceName;
if (instanceId.contains(Constants.SMIS80_DELIMITER)) {
instanceName = instanceId.replaceAll(".*\\Q" + Constants.SMIS80_DELIMITER + "\\E", "");
} else {
instanceName = instanceId.replaceAll(".*\\+", "");
}
StorageHADomain haDomain = haDomains.get(instanceName);
if (haDomain == null) {
_logger.error("No StorageHADomain for instanceName: " + instanceName);
continue;
}
updateMetrics(metrics, metricSequence, haDomain, system, keyMap);
}
} catch (Exception e) {
_logger.error("Failed while extracting stats for FEAdapts: ", e);
}
}
use of com.emc.storageos.plugins.AccessProfile in project coprhd-controller by CoprHD.
the class StorageSystemStatsProcessor method processResult.
/**
* System metrics sequence: string CSVSequence[] = InstanceID, ElementType,
* TotalIOs, KBytesTransferred, ReadIOs, ReadHitIOs, KBytesRead, WriteIOs,
* WriteHitIOs, KBytesWritten;
*/
@SuppressWarnings("unchecked")
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws SMIPluginException {
long timeInMillis;
try {
timeInMillis = (Long) keyMap.get(Constants._TimeCollected);
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
CIMArgument<?>[] outputArguments = (CIMArgument<?>[]) resultObj;
List<Stat> metricsObjList = (List<Stat>) keyMap.get(Constants._Stats);
String[] arrayStats = ((String[]) outputArguments[0].getValue())[0].split("\n");
List<String> metricSequence = (List<String>) keyMap.get(Constants.STORAGEOS_SYSTEM_MANIFEST);
_logger.debug("System metricNames Sequence {}", metricSequence);
for (String arrayStat : arrayStats) {
if (arrayStat.isEmpty()) {
_logger.debug("Empty arrayStat returned as part of Statistics Response");
continue;
}
Stat systemStat = new Stat();
Iterable<String> splitIterator = Splitter.on(Constants.SEMI_COLON).split(arrayStat);
List<String> systemMetricList = Lists.newLinkedList(splitIterator);
String nativeGuid = getSystemNativeGuidFromMetric(systemMetricList.get(0).toUpperCase(), keyMap);
systemStat.setNativeGuid(nativeGuid);
systemStat.setResourceId(profile.getSystemId());
systemStat.setServiceType(Constants._Block);
systemStat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
systemStat.setTimeInMillis(timeInMillis);
if (null != metricSequence && !metricSequence.isEmpty()) {
injectSystemStatMetrics(metricSequence, systemStat, systemMetricList);
} else {
_logger.error("failed processing System Metric values as metric sequence is null.");
}
metricsObjList.add(systemStat);
}
_logger.info("injected system statistics in DB.");
} catch (Exception e) {
_logger.error("Failed while extracting Stats for storage Systems: ", e);
}
resultObj = null;
}
use of com.emc.storageos.plugins.AccessProfile in project coprhd-controller by CoprHD.
the class VNXStoragePortStatsProcessor method processResult.
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
final PostMethod result = (PostMethod) resultObj;
_logger.info("processing moversStats response" + resultObj);
try {
List<Stat> newstatsList = null;
Map<String, List<String>> interPortMap = null;
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
List<Stat> statsList = (List<Stat>) keyMap.get(VNXFileConstants.STATS);
final DbClient dbClient = (DbClient) keyMap.get(VNXFileConstants.DBCLIENT);
/*
* step --> 1 get the interface map for mover and interface map contain values as storage ports
* <MoverId, Map<interfaceIP, list<physicalPortName>>
*/
Map<String, Map<String, List<String>>> moverInterMap = (Map<String, Map<String, List<String>>>) keyMap.get(VNXFileConstants.INTREFACE_PORT_MAP);
ResponsePacket responsePacket = (ResponsePacket) _unmarshaller.unmarshal(result.getResponseBodyAsStream());
List<Object> moversStats = getQueryStatsResponse(responsePacket);
Iterator<Object> iterator = moversStats.iterator();
// get the storagesystem from db
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, profile.getSystemId());
// process Mover stats contains samples for each data mover and calculate port metrics
while (iterator.hasNext()) {
MoverNetStats moverNetStats = (MoverNetStats) iterator.next();
// process mover stats per data mover
String moverId = moverNetStats.getMover();
// get interfaces and their list ports for mover id
interPortMap = moverInterMap.get(moverId);
// get the sample data of mover or VDM
List<MoverNetStats.Sample> sampleList = moverNetStats.getSample();
Map<String, BigInteger> stringMapPortIOs = new HashMap<String, BigInteger>();
/*
* step -->2 get the io-ops of physical ports from samples
* <physicalPortName, Big(input + output band)>
*/
getPortIOTraffic(sampleList, stringMapPortIOs);
// stats sample time
long sampleTime = sampleList.get(0).getTime();
/* step -->3 process the port metrics and update storageport object and store in db */
newstatsList = processPortStatsInfo(interPortMap, stringMapPortIOs, storageSystem, dbClient, sampleTime);
// finally add to stat object
statsList.addAll(newstatsList);
}
// calculate the avg port utilization for VDM and store in db
portMetricsProcessor.dataMoverAvgPortMetrics(profile.getSystemId());
} catch (final Exception ex) {
_logger.error("Exception occurred while processing the volume stats response due to {}", ex.getMessage());
} finally {
result.releaseConnection();
}
}
Aggregations