use of com.emc.storageos.volumecontroller.impl.plugins.metering.file.FileDBInsertion in project coprhd-controller by CoprHD.
the class DataDomainCommunicationInterface method collectStatisticsInformation.
@Override
public void collectStatisticsInformation(AccessProfile accessProfile) throws BaseCollectionException, DataDomainApiException {
long statsCount = 0;
URI storageSystemId = null;
StorageSystem storageSystem = null;
try {
_log.info("Stats collection for {} using ip {}", accessProfile.getSystemId(), accessProfile.getIpAddress());
storageSystemId = accessProfile.getSystemId();
storageSystem = _dbClient.queryObject(StorageSystem.class, storageSystemId);
initializeKeyMap(accessProfile);
DataDomainClient ddClient = getDataDomainClient(accessProfile);
URI providerId = storageSystem.getActiveProviderURI();
StorageProvider provider = _dbClient.queryObject(StorageProvider.class, providerId);
ZeroRecordGenerator zeroRecordGenerator = new FileZeroRecordGenerator();
CassandraInsertion statsColumnInjector = new FileDBInsertion();
DataDomainStatsRecorder recorder = new DataDomainStatsRecorder(zeroRecordGenerator, statsColumnInjector);
// Stats collection start time
long statsCollectionStartTime = storageSystem.getLastMeteringRunTime();
// start time to the time the storage system was successfully discovered.
if (statsCollectionStartTime == 0) {
statsCollectionStartTime = storageSystem.getSuccessDiscoveryTime();
}
// Stats collection end time
long statsCollectionEndTime = accessProfile.getCurrentSampleTime();
_keyMap.put(Constants._TimeCollected, statsCollectionEndTime);
// Get list of file systems on the device that are in the DB
List<URI> fsUris = zeroRecordGenerator.extractVolumesOrFileSharesFromDB(storageSystemId, _dbClient, FileShare.class);
List<FileShare> fsObjs = _dbClient.queryObject(FileShare.class, fsUris, true);
// Get capacity usage info on individual mtrees
List<Stat> stats = new ArrayList<>();
for (FileShare fileSystem : fsObjs) {
String fsNativeId = fileSystem.getNativeId();
String fsNativeGuid = fileSystem.getNativeGuid();
// Retrieve the last 2 data points only
int entriesRetrieved = 0;
List<DDStatsCapacityInfo> statsCapInfos = new ArrayList<>();
// Default
DDStatsIntervalQuery granularity = DDStatsIntervalQuery.hour;
// Retrieve hourly data - lowest resolution supported by DD arrays.
try {
DDMtreeCapacityInfos mtreeCapInfo = ddClient.getMTreeCapacityInfo(storageSystem.getNativeGuid(), fsNativeId, DataDomainApiConstants.STATS_FIRST_PAGE, DataDomainApiConstants.STATS_PAGE_SIZE, DDStatsDataViewQuery.absolute, DDStatsIntervalQuery.hour, true, DataDomainApiConstants.DESCENDING_SORT);
entriesRetrieved += mtreeCapInfo.getPagingInfo().getPageEntries();
// Collect stats
List<DDStatsCapacityInfo> capacityInfos = mtreeCapInfo.getStatsCapacityInfo();
if (capacityInfos != null) {
statsCapInfos.addAll(capacityInfos);
}
statsCount += entriesRetrieved;
} catch (Exception e) {
_log.info("Stats collection info not found for fileNativeGuid ", fsNativeGuid);
continue;
}
// Retrieved all pages, now save in DB if info changed in the latest data point
long usedCapacity = 0;
if (fileSystem.getUsedCapacity() != null) {
usedCapacity = fileSystem.getUsedCapacity();
}
DDStatsCapacityInfo statsCapInfo = null;
Stat stat = null;
if (statsCapInfos != null && !statsCapInfos.isEmpty()) {
statsCapInfo = statsCapInfos.get(0);
_keyMap.put(Constants._Granularity, granularity);
stat = recorder.addUsageInfo(statsCapInfo, _keyMap, fsNativeGuid, ddClient);
}
// Persist FileShare capacity stats only if usage info has changed
long allocatedCapacity = 0;
if (stat != null) {
allocatedCapacity = stat.getAllocatedCapacity();
}
// TODO: a method to detect changes in stats will be useful
boolean statsChanged = (usedCapacity != allocatedCapacity) ? true : false;
if ((stat != null) && (!fileSystem.getInactive()) && (statsChanged)) {
stats.add(stat);
fileSystem.setUsedCapacity(allocatedCapacity);
fileSystem.setCapacity(stat.getProvisionedCapacity());
_dbClient.persistObject(fileSystem);
}
}
// Determine if a filesystems were deleted from this device and write zero records for deleted ones
zeroRecordGenerator.identifyRecordstobeZeroed(_keyMap, stats, FileShare.class);
persistStatsInDB(stats);
// TODO: Metering task completer will overwrite currTime below with a new
// time as the last collection time. To avoid this, setLastTime in
// MeteringTaskCompleter should be modified to set last metering run time
// only if it
storageSystem.setLastMeteringRunTime(statsCollectionEndTime);
_log.info("Done metering device {}, processed {} file system stats ", storageSystemId, statsCount);
_log.info("End collecting statistics for ip address {}", accessProfile.getIpAddress());
} catch (Exception e) {
_log.error("CollectStatisticsInformation failed. Storage system: " + storageSystemId, e);
throw DataDomainApiException.exceptions.statsCollectionFailed(e.getMessage());
}
}
use of com.emc.storageos.volumecontroller.impl.plugins.metering.file.FileDBInsertion in project coprhd-controller by CoprHD.
the class IsilonCommunicationInterface method collectStatisticsInformation.
@Override
public void collectStatisticsInformation(AccessProfile accessProfile) throws BaseCollectionException {
URI storageSystemId = null;
StorageSystem isilonCluster = null;
long statsCount = 0;
try {
_log.info("Metering for {} using ip {}", accessProfile.getSystemId(), accessProfile.getIpAddress());
IsilonApi api = getIsilonDevice(accessProfile);
long latestSampleTime = accessProfile.getLastSampleTime();
storageSystemId = accessProfile.getSystemId();
isilonCluster = _dbClient.queryObject(StorageSystem.class, storageSystemId);
String serialNumber = isilonCluster.getSerialNumber();
String deviceType = isilonCluster.getSystemType();
initializeKeyMap(accessProfile);
boolean fsChanged = false;
List<Stat> stats = new ArrayList<Stat>();
List<FileShare> modifiedFileSystems = new ArrayList<FileShare>();
ZeroRecordGenerator zeroRecordGenerator = new FileZeroRecordGenerator();
CassandraInsertion statsColumnInjector = new FileDBInsertion();
// get usage stats from quotas
IsilonStatsRecorder recorder = new IsilonStatsRecorder(zeroRecordGenerator, statsColumnInjector);
_keyMap.put(Constants._TimeCollected, System.currentTimeMillis());
// compute static load processor code
computeStaticLoadMetrics(storageSystemId);
Map<String, String> fileSystemsMap = getStorageSystemFileShares(storageSystemId);
if (fileSystemsMap.isEmpty()) {
// No file shares for the storage system,
// ignore stats collection for the system!!!
_log.info("No file systems found for storage device {}. Hence metering stats collection ignored.", storageSystemId);
return;
}
// Process IsilonQuotas page by page (MAX 1000) in a page...
String resumeToken = null;
do {
IsilonApi.IsilonList<IsilonSmartQuota> quotas = api.listQuotas(resumeToken);
resumeToken = quotas.getToken();
for (IsilonSmartQuota quota : quotas.getList()) {
String fsNativeId = quota.getPath();
String fsNativeGuid = NativeGUIDGenerator.generateNativeGuid(deviceType, serialNumber, fsNativeId);
String fsId = fileSystemsMap.get(fsNativeGuid);
if (fsId == null || fsId.isEmpty()) {
// No file shares found for the quota
// ignore stats collection for the file system!!!
_log.debug("File System does not exists with nativeid {}. Hence ignoring stats collection.", fsNativeGuid);
continue;
}
Stat stat = recorder.addUsageStat(quota, _keyMap, fsId, api);
fsChanged = false;
if (null != stat) {
stats.add(stat);
// Persists the file system, only if change in used capacity.
FileShare fileSystem = _dbClient.queryObject(FileShare.class, stat.getResourceId());
if (fileSystem != null) {
if (!fileSystem.getInactive()) {
if (null != fileSystem.getUsedCapacity() && null != stat.getAllocatedCapacity() && !fileSystem.getUsedCapacity().equals(stat.getAllocatedCapacity())) {
fileSystem.setUsedCapacity(stat.getAllocatedCapacity());
fsChanged = true;
}
if (null != fileSystem.getSoftLimit() && null != fileSystem.getSoftLimitExceeded() && null != quota.getThresholds() && null != quota.getThresholds().getsoftExceeded() && !fileSystem.getSoftLimitExceeded().equals(quota.getThresholds().getsoftExceeded())) {
// softLimitExceeded
fileSystem.setSoftLimitExceeded(quota.getThresholds().getsoftExceeded());
fsChanged = true;
}
if (fsChanged) {
modifiedFileSystems.add(fileSystem);
}
}
}
}
// Each batch with MAX_RECORDS_SIZE - 100 records!!!
if (modifiedFileSystems.size() >= MAX_RECORDS_SIZE) {
_dbClient.updateObject(modifiedFileSystems);
_log.info("Processed {} file systems stats ", modifiedFileSystems.size());
modifiedFileSystems.clear();
}
if (stats.size() >= MAX_RECORDS_SIZE) {
_log.info("Processed {} stats", stats.size());
persistStatsInDB(stats);
}
}
statsCount = statsCount + quotas.size();
_log.info("Processed {} file system stats for device {} ", quotas.size(), storageSystemId);
} while (resumeToken != null);
zeroRecordGenerator.identifyRecordstobeZeroed(_keyMap, stats, FileShare.class);
// write the remaining records!!
if (!modifiedFileSystems.isEmpty()) {
_dbClient.updateObject(modifiedFileSystems);
_log.info("Processed {} file systems stats ", modifiedFileSystems.size());
modifiedFileSystems.clear();
}
if (!stats.isEmpty()) {
_log.info("Processed {} stats", stats.size());
persistStatsInDB(stats);
}
latestSampleTime = System.currentTimeMillis();
accessProfile.setLastSampleTime(latestSampleTime);
_log.info("Done metering device {}, processed {} file system stats ", storageSystemId, statsCount);
} catch (Exception e) {
if (isilonCluster != null) {
cleanupDiscovery(isilonCluster);
}
_log.error("CollectStatisticsInformation failed. Storage system: " + storageSystemId, e);
throw (new IsilonCollectionException(e.getMessage()));
}
}
use of com.emc.storageos.volumecontroller.impl.plugins.metering.file.FileDBInsertion in project coprhd-controller by CoprHD.
the class NetAppFileCommunicationInterface method collectStatisticsInformation.
@Override
public void collectStatisticsInformation(AccessProfile accessProfile) throws BaseCollectionException {
URI storageSystemId = null;
String fsName = null;
try {
_logger.info("Metering for {} using ip {}", accessProfile.getSystemId(), accessProfile.getIpAddress());
String arrayIp = accessProfile.getIpAddress();
String arrayUser = accessProfile.getUserName();
String arrayPassword = accessProfile.getPassword();
int arrayPort = accessProfile.getPortNumber();
NetAppApi netAppApi = new NetAppApi.Builder(arrayIp, arrayPort, arrayUser, arrayPassword).https(true).build();
long latestSampleTime = accessProfile.getLastSampleTime();
storageSystemId = accessProfile.getSystemId();
StorageSystem NetAppArray = _dbClient.queryObject(StorageSystem.class, storageSystemId);
String serialNumber = NetAppArray.getSerialNumber();
String deviceType = NetAppArray.getSystemType();
initializeKeyMap(accessProfile);
List<Stat> stats = new ArrayList<Stat>();
ZeroRecordGenerator zeroRecordGenerator = new FileZeroRecordGenerator();
CassandraInsertion statsColumnInjector = new FileDBInsertion();
/* Get Stats from the NTAP array */
List<Map<String, String>> usageStats = new ArrayList<Map<String, String>>();
NetAppStatsRecorder recorder = new NetAppStatsRecorder(zeroRecordGenerator, statsColumnInjector);
_keyMap.put(Constants._TimeCollected, System.currentTimeMillis());
Map<String, Number> metrics = new ConcurrentHashMap<String, Number>();
List<URI> storageSystemIds = new ArrayList<URI>();
storageSystemIds.add(storageSystemId);
List<FileShare> fsObjs = _dbClient.queryObjectField(FileShare.class, Constants.STORAGE_DEVICE, storageSystemIds);
List<URI> fsUris = zeroRecordGenerator.extractVolumesOrFileSharesFromDB(storageSystemId, _dbClient, FileShare.class);
for (URI fsUri : fsUris) {
FileShare fsObj = _dbClient.queryObject(FileShare.class, fsUri);
if (fsObj.getInactive()) {
continue;
}
fsName = fsObj.getName();
String fsNativeGuid = NativeGUIDGenerator.generateNativeGuid(deviceType, serialNumber, fsObj.getPath());
try {
usageStats = netAppApi.listVolumeInfo(fsName, null);
for (Map<String, String> map : usageStats) {
/*
* TODO: usageStats usually contains a single element. If
* the list consists of multiple elements, all but one
* element will get overwritten.
*/
metrics.put(Constants.SIZE_TOTAL, 0);
if (map.get(Constants.SIZE_TOTAL) != null) {
metrics.put(Constants.SIZE_TOTAL, Long.valueOf(map.get(Constants.SIZE_TOTAL)));
}
metrics.put(Constants.SIZE_USED, 0);
if (map.get(Constants.SIZE_USED) != null) {
metrics.put(Constants.SIZE_USED, Long.valueOf(map.get(Constants.SIZE_USED)));
}
/*
* TODO: Bytes per block on NTAP is hard coded for now. If
* possible, we should to get this from the array.
*/
Long snapshotBytesReserved = 0L;
if (map.get(Constants.SNAPSHOT_BLOCKS_RESERVED) != null) {
snapshotBytesReserved = Long.valueOf(map.get(Constants.SNAPSHOT_BLOCKS_RESERVED)) * Constants.NETAPP_BYTES_PER_BLOCK;
}
metrics.put(Constants.SNAPSHOT_BYTES_RESERVED, snapshotBytesReserved);
Integer snapshotCount = _dbClient.countObjects(Snapshot.class, Constants.PARENT, fsObj.getId());
metrics.put(Constants.SNAPSHOT_COUNT, snapshotCount);
Stat stat = recorder.addUsageStat(fsNativeGuid, _keyMap, metrics);
if (stat != null) {
stats.add(stat);
// Persists the file system, only if change in used capacity.
if (fsObj.getUsedCapacity() != stat.getAllocatedCapacity()) {
fsObj.setUsedCapacity(stat.getAllocatedCapacity());
_dbClient.persistObject(fsObj);
}
}
}
} catch (NetAppException ne) {
String arg = fsName.toString() + ", " + accessProfile.getIpAddress().toString() + ", " + accessProfile.getSystemType().toString();
_logger.info("Failed to retrieve stats for FileShare, Syste, Type: {}", arg);
}
}
if (!stats.isEmpty()) {
zeroRecordGenerator.identifyRecordstobeZeroed(_keyMap, stats, FileShare.class);
persistStatsInDB(stats);
latestSampleTime = System.currentTimeMillis();
accessProfile.setLastSampleTime(latestSampleTime);
}
_logger.info("Done metering device {}", storageSystemId);
} catch (Exception e) {
String message = "collectStatisticsInformation failed. Storage system: " + storageSystemId;
_logger.error(message, e);
throw NetAppException.exceptions.collectStatsFailed(accessProfile.getIpAddress(), accessProfile.getSystemType(), message);
}
}
Aggregations