use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class FrontEndPortStatsProcessor method processResult.
@SuppressWarnings("unchecked")
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
Iterator<CIMInstance> storagePortStatsResponseItr = (Iterator<CIMInstance>) resultObj;
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
URI systemId = profile.getSystemId();
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
logger.info("Processing FrontEnd Ports response");
try {
List<Stat> metricsObjList = (List<Stat>) keyMap.get(Constants._Stats);
while (storagePortStatsResponseItr.hasNext()) {
CIMInstance storagePortStatInstance = (CIMInstance) storagePortStatsResponseItr.next();
Stat fePortStat = new Stat();
fePortStat.setServiceType(Constants._Block);
fePortStat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
Long providerCollectionTime = convertCIMStatisticTime(getCIMPropertyValue(storagePortStatInstance, STATISTICTIME));
if (0 != providerCollectionTime) {
fePortStat.setTimeInMillis(providerCollectionTime);
} else {
fePortStat.setTimeInMillis((Long) keyMap.get(Constants._TimeCollected));
}
fePortStat.setTotalIOs(ControllerUtils.getModLongValue(getCIMPropertyValue(storagePortStatInstance, TOTALIOS)));
fePortStat.setKbytesTransferred(ControllerUtils.getModLongValue(getCIMPropertyValue(storagePortStatInstance, KBYTESTRANSFERRED)));
setPortRelatedInfo(storagePortStatInstance, systemId, dbClient, fePortStat);
metricsObjList.add(fePortStat);
}
} catch (Exception ex) {
logger.error("Failed while extracting Stats for Front end ports: ", ex);
} finally {
resultObj = null;
}
logger.info("Processing FrontEnd Ports response completed");
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class DataDomainCommunicationInterface method collectStatisticsInformation.
@Override
public void collectStatisticsInformation(AccessProfile accessProfile) throws BaseCollectionException, DataDomainApiException {
long statsCount = 0;
URI storageSystemId = null;
StorageSystem storageSystem = null;
try {
_log.info("Stats collection for {} using ip {}", accessProfile.getSystemId(), accessProfile.getIpAddress());
storageSystemId = accessProfile.getSystemId();
storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemId);
initializeKeyMap(accessProfile);
DataDomainClient ddClient = getDataDomainClient(accessProfile);
URI providerId = storageSystem.getActiveProviderURI();
StorageProvider provider = _dbClient.queryObject(StorageProvider.class, providerId);
ZeroRecordGenerator zeroRecordGenerator = new FileZeroRecordGenerator();
CassandraInsertion statsColumnInjector = new FileDBInsertion();
DataDomainStatsRecorder recorder = new DataDomainStatsRecorder(zeroRecordGenerator, statsColumnInjector);
// Stats collection start time
long statsCollectionStartTime = storageSystem.getLastMeteringRunTime();
// start time to the time the storage system was successfully discovered.
if (statsCollectionStartTime == 0) {
statsCollectionStartTime = storageSystem.getSuccessDiscoveryTime();
}
// Stats collection end time
long statsCollectionEndTime = accessProfile.getCurrentSampleTime();
_keyMap.put(Constants._TimeCollected, statsCollectionEndTime);
// Get list of file systems on the device that are in the DB
List<URI> fsUris = zeroRecordGenerator.extractVolumesOrFileSharesFromDB(storageSystemId, _dbClient, FileShare.class);
List<FileShare> fsObjs = _dbClient.queryObject(FileShare.class, fsUris, true);
// Get capacity usage info on individual mtrees
List<Stat> stats = new ArrayList<>();
for (FileShare fileSystem : fsObjs) {
String fsNativeId = fileSystem.getNativeId();
String fsNativeGuid = fileSystem.getNativeGuid();
// Retrieve the last 2 data points only
int entriesRetrieved = 0;
List<DDStatsCapacityInfo> statsCapInfos = new ArrayList<>();
// Default
DDStatsIntervalQuery granularity = DDStatsIntervalQuery.hour;
// Retrieve hourly data - lowest resolution supported by DD arrays.
try {
DDMtreeCapacityInfos mtreeCapInfo = ddClient.getMTreeCapacityInfo(storageSystem.getNativeGuid(), fsNativeId, DataDomainApiConstants.STATS_FIRST_PAGE, DataDomainApiConstants.STATS_PAGE_SIZE, DDStatsDataViewQuery.absolute, DDStatsIntervalQuery.hour, true, DataDomainApiConstants.DESCENDING_SORT);
entriesRetrieved += mtreeCapInfo.getPagingInfo().getPageEntries();
// Collect stats
List<DDStatsCapacityInfo> capacityInfos = mtreeCapInfo.getStatsCapacityInfo();
if (capacityInfos != null) {
statsCapInfos.addAll(capacityInfos);
}
statsCount += entriesRetrieved;
} catch (Exception e) {
_log.info("Stats collection info not found for fileNativeGuid ", fsNativeGuid);
continue;
}
// Retrieved all pages, now save in DB if info changed in the latest data point
long usedCapacity = 0;
if (fileSystem.getUsedCapacity() != null) {
usedCapacity = fileSystem.getUsedCapacity();
}
DDStatsCapacityInfo statsCapInfo = null;
Stat stat = null;
if (statsCapInfos != null && !statsCapInfos.isEmpty()) {
statsCapInfo = statsCapInfos.get(0);
_keyMap.put(Constants._Granularity, granularity);
stat = recorder.addUsageInfo(statsCapInfo, _keyMap, fsNativeGuid, ddClient);
}
// Persist FileShare capacity stats only if usage info has changed
long allocatedCapacity = 0;
if (stat != null) {
allocatedCapacity = stat.getAllocatedCapacity();
}
// TODO: a method to detect changes in stats will be useful
boolean statsChanged = (usedCapacity != allocatedCapacity) ? true : false;
if ((stat != null) && (!fileSystem.getInactive()) && (statsChanged)) {
stats.add(stat);
fileSystem.setUsedCapacity(allocatedCapacity);
fileSystem.setCapacity(stat.getProvisionedCapacity());
_dbClient.persistObject(fileSystem);
}
}
// Determine if a filesystems were deleted from this device and write zero records for deleted ones
zeroRecordGenerator.identifyRecordstobeZeroed(_keyMap, stats, FileShare.class);
persistStatsInDB(stats);
// TODO: Metering task completer will overwrite currTime below with a new
// time as the last collection time. To avoid this, setLastTime in
// MeteringTaskCompleter should be modified to set last metering run time
// only if it
storageSystem.setLastMeteringRunTime(statsCollectionEndTime);
_log.info("Done metering device {}, processed {} file system stats ", storageSystemId, statsCount);
_log.info("End collecting statistics for ip address {}", accessProfile.getIpAddress());
} catch (Exception e) {
_log.error("CollectStatisticsInformation failed. Storage system: " + storageSystemId, e);
throw DataDomainApiException.exceptions.statsCollectionFailed(e.getMessage());
}
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class ExtendedCommunicationInterfaceImpl method injectStats.
/**
* Inject Stats to Cassandra. To-Do: To verify, how fast batch insertion is
* working for entries in 1000s. If its taking time, then will need to work
* out, splitting again the batch into smaller batches.
*
* @throws BaseCollectionException
*/
protected void injectStats() throws BaseCollectionException {
DbClient client = (DbClient) _keyMap.get(Constants.dbClient);
@SuppressWarnings("unchecked") List<Stat> stats = (List<Stat>) _keyMap.get(Constants._Stats);
@SuppressWarnings("unchecked") Map<String, String> props = (Map<String, String>) _keyMap.get(Constants.PROPS);
String collectionType = props.get(Constants.METERING_COLLECTION_TYPE);
if (collectionType != null && Constants.METERING_COLLECTION_TYPE_FULL.equalsIgnoreCase(collectionType)) {
_logger.info("Started Injection of Stats to Cassandra");
// insert in batches
int size = Constants.DEFAULT_PARTITION_SIZE;
if (null != props.get(Constants.METERING_RECORDS_PARTITION_SIZE)) {
size = Integer.parseInt(props.get(Constants.METERING_RECORDS_PARTITION_SIZE));
}
if (null == _partitionManager) {
Stat[] statBatch = new Stat[stats.size()];
statBatch = stats.toArray(statBatch);
try {
client.insertTimeSeries(StatTimeSeries.class, statBatch);
_logger.info("{} Stat records persisted to DB", statBatch.length);
} catch (DatabaseException e) {
_logger.error("Error inserting records into the database", e);
}
} else {
_partitionManager.insertInBatches(stats, size, client);
}
} else {
_logger.info("Stat records not persisted to DB");
}
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class NetAppStatsRecorder method addUsageStat.
/**
* Adds a Stat for usage.
*
* @param fsNativeGuid native Guid of the file share
* @param keyMap
* @param provisioned
* @param usage
* @return the stat
*/
public Stat addUsageStat(String fsNativeGuid, Map<String, Object> keyMap, Map<String, Number> metrics) {
Stat stat = zeroRecordGenerator.injectattr(keyMap, fsNativeGuid, null);
if (stat != null) {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
stat.setTimeInMillis((Long) keyMap.get(Constants._TimeCollected));
stat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
statsColumnInjector.injectColumns(stat, dbClient);
stat.setProvisionedCapacity((Long) metrics.get(Constants.SIZE_TOTAL));
stat.setAllocatedCapacity((Long) metrics.get(Constants.SIZE_USED));
stat.setSnapshotCapacity((Long) metrics.get(Constants.SNAPSHOT_BYTES_RESERVED));
stat.setSnapshotCount((Integer) metrics.get(Constants.SNAPSHOT_COUNT));
_log.debug(String.format("Stat: %s: %s: provisioned(%s): used(%s)", stat.getResourceId(), fsNativeGuid, stat.getProvisionedCapacity(), stat.getAllocatedCapacity()));
_log.debug(String.format("Stat: %s: %s: snapshot capacity (%s), count (%s)", stat.getResourceId(), fsNativeGuid, stat.getSnapshotCapacity(), stat.getSnapshotCount()));
}
return stat;
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class CapacityPoolProcessor method processVolumeCapacity.
/**
* Process volume capacity, iterates over the given chunk and process
* each volume capacity.
*
* @param volumeInstances {@link CloseableIterator} instance
* @param keyMap {@link Map} instance
*/
private void processVolumeCapacity(CloseableIterator<CIMInstance> volumeInstances, Map<String, Object> keyMap) {
while (volumeInstances.hasNext()) {
try {
final CIMInstance volumeInstance = (CIMInstance) volumeInstances.next();
String key = null;
String spaceConsumed = null;
if (keyMap.containsKey(Constants.IS_NEW_SMIS_PROVIDER) && Boolean.valueOf(keyMap.get(Constants.IS_NEW_SMIS_PROVIDER).toString())) {
key = createKeyfor8x(volumeInstance);
spaceConsumed = volumeInstance.getProperty(_emcspaceConsumed).getValue().toString();
} else {
key = createKeyfromProps(volumeInstance);
spaceConsumed = volumeInstance.getProperty(_spaceConsumed).getValue().toString();
}
Object value = getMetrics(keyMap, key);
if (null == value) {
keyMap.put(key, Long.parseLong(spaceConsumed));
} else if (value instanceof Stat) {
Stat metrics = (Stat) value;
metrics.setProvisionedCapacity(returnProvisionedCapacity(volumeInstance, keyMap));
metrics.setAllocatedCapacity(Long.parseLong(spaceConsumed));
}
} catch (Exception ex) {
// This check will make sure to skip unnecessary logs
if (!(ex instanceof BaseCollectionException)) {
_logger.error("Provisioned Capacity failure : ", ex);
}
}
}
}
Aggregations