use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class ZeroRecordGenerator method identifyRecordstobeZeroed.
/**
* Say, Cache has 100 Volumes stored in it. The current Metering Collection
* results in 90 Volumes being retrieved from Providers. i.e. 10 Volumes
* might get deleted, and needs to be Zeroed. Logic below identifies the 10
* Missing Volumes and pushes to a Map,later Zero Stat Records would get
* generated for those 10 Volumes before pushing to Cassandra.
* * @param keyMap
*
* @param Volumes
*/
public void identifyRecordstobeZeroed(Map<String, Object> keyMap, List<Stat> metricsObjList, final Class clazz) {
try {
@SuppressWarnings("unchecked") Set<String> resourceIds = (Set<String>) keyMap.get(Constants._nativeGUIDs);
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
URI storageSystemURI = profile.getSystemId();
List<URI> volumeURIsInDB = extractVolumesOrFileSharesFromDB(storageSystemURI, dbClient, clazz);
Set<String> zeroedRecords = Sets.newHashSet();
Set<String> volumeURIsInDBSet = new HashSet<String>(Lists.transform(volumeURIsInDB, Functions.toStringFunction()));
// used Sets in Guava libraries, which has the ability to get us
// the difference without altering the Cache.
Sets.difference(volumeURIsInDBSet, resourceIds).copyInto(zeroedRecords);
if (!zeroedRecords.isEmpty()) {
_logger.info("Records Zeroed : {}", zeroedRecords.size());
// used in caching Volume Records
for (String record : zeroedRecords) {
Stat zeroStatRecord = injectattr(keyMap, record, clazz);
if (null != zeroStatRecord) {
generateZeroRecord(zeroStatRecord, keyMap);
metricsObjList.add(zeroStatRecord);
} else {
_logger.debug("Records need to get Zeroed doesn't have VolumeUUID : {}", record);
}
}
}
} catch (Exception ex) {
// No need to throw Exception just because Zeroing Records failed,
// continue with persisting other records
_logger.error("Error in Zeroing records :", ex);
}
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class ZeroRecordGenerator method injectattr.
/**
* Inject UUID, vpool, & project into Stats before pushing to Cassandra.
* To-Do: Inspect on using any external Cache Mechanism like EhCache
* Memcache, if needed.
*
* Made this method as abstract sothat each plugin will implement their logic
* set the resourceId.
*
* @param Map
* <String, Object>
* @param Stat
* @param NativeGUID
*/
public <T extends DataObject> Stat injectattr(Map<String, Object> keyMap, String nativeGuid, Class<T> clazz) {
Stat statObj = null;
URI volURI = null;
URI projectURI = null;
URI tenantURI = null;
URI vPoolURI = null;
T volObj = null;
boolean snapProcessed = false;
try {
DbClient client = (DbClient) keyMap.get(Constants.dbClient);
/**
* Number of records to be Zeroed, generally will not be a huge
* number compared to the Volumes getting processed. Hence, the
* number of calls would be minimum. (Or) the other approach would
* be to Cache NativeGuids.
*
* the Argument nativeGuid would be the Resource ID URI in case of
* Zero Records.
*/
if (null != clazz) {
volURI = new URI(nativeGuid);
volObj = client.queryObject(clazz, volURI);
if (volObj instanceof Volume) {
Volume volume = (Volume) volObj;
nativeGuid = volume.getNativeGuid();
projectURI = volume.getProject().getURI();
tenantURI = volume.getTenant().getURI();
vPoolURI = volume.getVirtualPool();
} else {
FileShare fileShare = (FileShare) volObj;
nativeGuid = fileShare.getNativeGuid();
projectURI = fileShare.getProject().getURI();
tenantURI = fileShare.getTenant().getURI();
vPoolURI = fileShare.getVirtualPool();
}
} else {
List<URI> volumeURIs = injectResourceURI(client, nativeGuid);
if (null == volumeURIs || volumeURIs.isEmpty()) {
_logger.debug("Querying Cassandra using nativeGUID:" + nativeGuid + "yields : 0 ResourceID");
return statObj;
}
volURI = volumeURIs.get(0);
}
long allocatedCapacity = 0L;
// if snap,process the parent volume
if (!URIUtil.isType(volURI, Volume.class) && !URIUtil.isType(volURI, FileShare.class)) {
_logger.debug("Skipping Statistics for Snapshots :" + volURI);
BlockObject bo = BlockObject.fetch(client, volURI);
if (bo instanceof BlockSnapshot) {
Volume parent = client.queryObject(Volume.class, ((BlockSnapshot) (bo)).getParent().getURI());
_logger.info("Processing snapshot's parent Volume {}", parent.getNativeGuid());
volURI = parent.getId();
nativeGuid = parent.getNativeGuid();
allocatedCapacity = ((BlockSnapshot) bo).getAllocatedCapacity();
snapProcessed = true;
}
}
// No need to verify whether Volume is inactive or not, as for
// zeroing records
// even inactive Volumes need to get zeroed.
// for verification purpose
_logger.debug("Querying Cassandra using nativeGUID:" + nativeGuid + "yields Resource ID :" + volURI);
if (keyMap.containsKey(nativeGuid)) {
statObj = (Stat) keyMap.get(nativeGuid);
} else {
// create a Metrics Object
statObj = getStatObject(volURI, client);
if (null == statObj) {
return statObj;
}
keyMap.put(nativeGuid, statObj);
statObj.setResourceId(volURI);
}
statObj.setNativeGuid(nativeGuid);
// additional DB call to get Volume/File object.
if (clazz != null) {
statObj.setProject(projectURI);
statObj.setTenant(tenantURI);
statObj.setVirtualPool(vPoolURI);
}
if (snapProcessed) {
_logger.info("Adding SnapShot details");
// which consume metering data.
if (null == statObj.getSnapshotCount()) {
statObj.setSnapshotCount(0);
statObj.setSnapshotCapacity(0);
}
statObj.setSnapshotCount(statObj.getSnapshotCount() + 1);
statObj.setSnapshotCapacity(statObj.getSnapshotCapacity() + allocatedCapacity);
}
// Add Volume URIs to local Collection, which will be compared
// against Volumes in DB to determine Zero Records.
@SuppressWarnings("unchecked") Set<String> volumeURIList = (Set<String>) keyMap.get(Constants._nativeGUIDs);
volumeURIList.add(volURI.toString());
} catch (Exception e) {
// continue processing other volumes
if (null != nativeGuid) {
_logger.error("Cassandra Database Error while querying VolumeUUId, VirtualPool & Project URIs : {}-->", nativeGuid, e);
}
}
// if processing snap, parent volume would have already added to the list, hence return null to skip
return snapProcessed ? null : statObj;
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class DataDomainStatsRecorder method addUsageInfo.
/**
* Adds capacity stats
*
* @param quota
* @param keyMap
* @param fsNativeGuid native Guid of the file share
* @param isilonApi
* @return the stat
*/
public Stat addUsageInfo(DDStatsCapacityInfo statsCapInfo, Map<String, Object> keyMap, String fsNativeGuid, DataDomainClient ddClient) {
Stat stat = zeroRecordGenerator.injectattr(keyMap, fsNativeGuid, null);
if (stat != null) {
try {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
long measurementTimePeriodInSec = 0;
DDStatsIntervalQuery granularity = (DDStatsIntervalQuery) keyMap.get(Constants._Granularity);
switch(granularity) {
case hour:
measurementTimePeriodInSec = HOUR_IN_SECONDS;
break;
case day:
measurementTimePeriodInSec = DAY_IN_SECONDS;
break;
case week:
measurementTimePeriodInSec = WEEK_IN_SECONDS;
break;
}
stat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
// DD returns epochs (seconds) - convert to ms
stat.setTimeInMillis(statsCapInfo.getCollectionEpoch() * 1000);
long used = statsCapInfo.getLogicalCapacity().getUsed();
long total = statsCapInfo.getLogicalCapacity().getTotal();
// Convert data written from Bytes/sec to Bytes
long preCompressionBytesWritten = 0;
long postCompressionBytesWritten = 0;
float compressionFactor = 1;
if ((statsCapInfo != null) && (statsCapInfo.getDataWritten() != null)) {
preCompressionBytesWritten = statsCapInfo.getDataWritten().getPreCompWritten();
postCompressionBytesWritten = statsCapInfo.getDataWritten().getPostCompWritten();
compressionFactor = statsCapInfo.getDataWritten().getCompressionFactor();
}
keyMap.put(Constants._FilePreCompressionBytesWritten, preCompressionBytesWritten);
keyMap.put(Constants._FilePostCompressionBytesWritten, postCompressionBytesWritten);
keyMap.put(Constants._CompressionRatio, compressionFactor);
// Provisioned capacity is not available for mtrees
stat.setAllocatedCapacity(used);
stat.setBandwidthIn(preCompressionBytesWritten);
statsColumnInjector.injectColumns(stat, dbClient);
_log.debug(String.format("Stat: %s: %s: provisioned(): used(%s)", stat.getResourceId(), fsNativeGuid, used));
} catch (DatabaseException ex) {
_log.error("Query to db failed for FileShare id {}, skipping recording usage stat.", stat.getResourceId(), ex);
}
}
return stat;
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class IsilonStatsRecorder method addUsageStat.
/**
* Adds a Stat for usage from the IsilonQuota.
*
* @param quota
* @param keyMap
* @param fsNativeGuid native Guid of the file share
* @param isilonApi
* @return the stat
*/
public Stat addUsageStat(IsilonSmartQuota quota, Map<String, Object> keyMap, String fsNativeGuid, IsilonApi isilonApi) {
Stat stat = zeroRecordGenerator.injectattr(keyMap, fsNativeGuid, FileShare.class);
if (stat != null) {
try {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
stat.setTimeInMillis((Long) keyMap.get(Constants._TimeCollected));
stat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
statsColumnInjector.injectColumns(stat, dbClient);
long provisionedCapacity = 0L;
Thresholds threshold = quota.getThresholds();
if (threshold != null && threshold.getHard() != null) {
provisionedCapacity = threshold.getHard();
}
stat.setProvisionedCapacity(provisionedCapacity);
long usedCapacity = quota.getUsagePhysical();
stat.setAllocatedCapacity(usedCapacity);
URIQueryResultList snapURIList = new URIQueryResultList();
dbClient.queryByConstraint(ContainmentConstraint.Factory.getFileshareSnapshotConstraint(stat.getResourceId()), snapURIList);
// Set snapshot count.
// Set snapshot size. Get current data for snapshot size (snapshot size changes dynamically).
int snapCount = 0;
long fsSnapshotSize = 0;
IsilonSnapshot isiSnap;
for (URI snapURI : snapURIList) {
Snapshot snap = dbClient.queryObject(Snapshot.class, snapURI);
// Filter out deleted Snapshot
if (snap != null && (!snap.getInactive())) {
String nativeId = snap.getNativeId();
try {
isiSnap = isilonApi.getSnapshot(nativeId);
} catch (IsilonException iex) {
_log.error(String.format("Stat: %s: can not get snapshot size for snapshot: %s", fsNativeGuid, nativeId), iex);
continue;
}
snapCount++;
fsSnapshotSize += Long.valueOf(isiSnap.getSize());
}
}
stat.setSnapshotCount(snapCount);
_log.debug(String.format("Stat: %s: snapshot count: %s", fsNativeGuid, snapCount));
stat.setSnapshotCapacity(fsSnapshotSize);
_log.debug(String.format("Stat: %s: snapshot size: %s", fsNativeGuid, fsSnapshotSize));
_log.debug(String.format("Stat: %s: %s: provisioned capacity(%s): used capacity(%s)", stat.getResourceId(), fsNativeGuid, provisionedCapacity, usedCapacity));
} catch (DatabaseException ex) {
_log.error("Query to db failed for FileShare id {}, skipping recording usage stat.", stat.getResourceId(), ex);
}
}
return stat;
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class FEPortStatsProcessor method createPortStatMetric.
/**
* Create a new PortStat.
*
* @param metrics
* @param portStatsList
* @param port
* @param keyMap
*/
private void createPortStatMetric(List<String> metricSequence, StoragePort port, Map<String, Object> keyMap, List<Stat> portStatsList, String[] metrics) {
int count = 0;
Stat portStat = new Stat();
Long kbytes = 0L;
Long iops = 0L;
String statisticTime = "";
for (String metricName : metricSequence) {
portStat.setTimeCollected((Long) keyMap.get(Constants._TimeCollected));
portStat.setTimeInMillis((Long) keyMap.get(Constants._TimeCollected));
portStat.setNativeGuid(port.getNativeGuid());
portStat.setResourceId(port.getId());
portStat.setServiceType(Constants._Block);
switch(FEPortMetric.lookup(metricName)) {
case InstanceID:
case ElementType:
break;
case TotalIOs:
iops = ControllerUtils.getLongValue(metrics[count]);
portStat.setTotalIOs(iops);
break;
case KBytesTransferred:
kbytes = ControllerUtils.getLongValue(metrics[count]);
portStat.setKbytesTransferred(kbytes);
break;
case StatisticTime:
statisticTime = metrics[count];
break;
default:
_logger.warn("Ignoring unknown metric {} during system metric processing:", metricName);
break;
}
count++;
}
portStatsList.add(portStat);
// Process the port metrics.
portMetricsProcessor.processFEPortMetrics(kbytes, iops, port, convertCIMStatisticTime(statisticTime));
}
Aggregations