use of com.emc.storageos.db.client.model.StorageHADomain in project coprhd-controller by CoprHD.
the class VNXFileCommApi method expandFS.
public XMLApiResult expandFS(final StorageSystem system, final FileShare fileShare, long extendSize, boolean isMountRequired, boolean isVirtualProvisioned) throws VNXException {
// get the data mover
boolean isMounted = false;
StorageHADomain dataMover = this.getDataMover(fileShare);
if (null != dataMover) {
sshApi.setConnParams(system.getIpAddress(), system.getUsername(), system.getPassword());
Map<String, String> existingMounts = sshApi.getFsMountpathMap(dataMover.getAdapterName());
if (existingMounts.get(fileShare.getName()) == null) {
isMounted = true;
} else {
isMounted = false;
}
}
_log.info("expandFS for fileName{} and isMountRequired {}", fileShare.getName(), String.valueOf(isMounted));
return expandFS(system, fileShare.getName(), extendSize, isMounted, isVirtualProvisioned);
}
use of com.emc.storageos.db.client.model.StorageHADomain in project coprhd-controller by CoprHD.
the class VPlexPerpetualCSVFileCollector method processCSVFileDataHeader.
/**
* Return a mapping of the metric name and the DataObject (StorageHADomain or StoragePort) to which the
* metric applies.
*
* First line from the CSV file will look like this:
*
* Time,Time (UTC),be-prt.write A1-FC02 (KB/s),be-prt.write A1-FC03 (KB/s),...
*
* 'headers' will contain each of the Strings delimited by ','. This function will parse that to determine
* what DataObject it should be associated with and its units.
*
* @param dbClient [IN] - DbClient used for DB access
* @param storageSystem [IN] - StorageSystem representing the VPlex array
* @param directorName [IN] - Name of VPlex director that this applies to
* @param headers [IN] - Metric names that show up in the file
* @return Map of String (header) to DataObject to which the metric applies
*/
private Map<String, MetricHeaderInfo> processCSVFileDataHeader(DbClient dbClient, StorageSystem storageSystem, String directorName, List<String> headers) {
Map<String, MetricHeaderInfo> metricToObjectMap = new HashMap<>();
for (String header : headers) {
Matcher matcher = METRIC_NAME_PATTERN.matcher(header);
if (matcher.matches()) {
String name = matcher.group(1);
// Limit the processing to only those metrics that we care about
if (!METRICS_NAMES_TO_GATHER.contains(name)) {
continue;
}
String objectName = matcher.group(2);
String units = matcher.group(3);
if (Strings.isNullOrEmpty(objectName)) {
objectName = EMPTY;
}
StorageHADomain vplexDirector = lookupVPlexDirectorByName(dbClient, storageSystem, directorName);
if (objectName.equals(EMPTY)) {
// This applies to the director
MetricHeaderInfo headerInfo = new MetricHeaderInfo();
headerInfo.type = MetricHeaderInfo.Type.DIRECTOR;
headerInfo.director = vplexDirector;
headerInfo.units = units;
metricToObjectMap.put(header, headerInfo);
} else {
// Let's assume that this is for a StoragePort with name 'objectName'
StoragePort storagePort = lookupVPlexFrontStoragePortByName(dbClient, vplexDirector, objectName);
if (storagePort != null) {
MetricHeaderInfo headerInfo = new MetricHeaderInfo();
headerInfo.type = MetricHeaderInfo.Type.PORT;
headerInfo.director = vplexDirector;
headerInfo.port = storagePort;
headerInfo.units = units;
metricToObjectMap.put(header, headerInfo);
}
}
}
}
return metricToObjectMap;
}
use of com.emc.storageos.db.client.model.StorageHADomain in project coprhd-controller by CoprHD.
the class VPlexPerpetualCSVFileCollector method findStorageHADomainByNameInDB.
/**
* Retrieve from the DB the StorageHADomain named 'directorName' and associated with VPlex array 'storageSystem'
*
* @param dbClient [IN] - DbClient used for DB access
* @param storageSystem [IN] - StorageSystem representing the VPlex array
* @param directorName [IN] - Name of the VPlex director to find
* @return StorageHADomain with name 'directorName' and associated with VPlex array 'storageSystem', otherwise null
*/
private StorageHADomain findStorageHADomainByNameInDB(DbClient dbClient, StorageSystem storageSystem, String directorName) {
URIQueryResultList results = new URIQueryResultList();
dbClient.queryByConstraint(ContainmentConstraint.Factory.getStorageDeviceStorageHADomainConstraint(storageSystem.getId()), results);
Iterator<StorageHADomain> directorIterator = dbClient.queryIterativeObjects(StorageHADomain.class, results, true);
while (directorIterator.hasNext()) {
StorageHADomain director = directorIterator.next();
if (director.getAdapterName().equals(directorName)) {
return director;
}
}
log.warn("Could not find StorageHADomain with adapterName '{}' for StorageSystem {}", directorName, storageSystem.getNativeGuid());
// Could not be found
return null;
}
use of com.emc.storageos.db.client.model.StorageHADomain in project coprhd-controller by CoprHD.
the class XtremIOMetricsCollector method collectXEnvCPUUtilization.
/**
* Collect the CPU Utilization for all XEnv's in the cluster.
*
* @param system the system
* @param dbClient the db client
* @param xtremIOClient the xtremio client
* @param xioClusterName the xtremio cluster name
* @throws Exception
*/
private void collectXEnvCPUUtilization(StorageSystem system, DbClient dbClient, XtremIOClient xtremIOClient, String xtremIOClusterName) throws Exception {
// An XENV(XtremIO Environment) is composed of software defined modules responsible for internal data path on the array.
// There are two CPU sockets per Storage Controller (SC), and one distinct XENV runs on each socket.
/**
* Collect average CPU usage:
* - Get the last processing time for the system,
* - If previously not queried or if it was long back, collect data for last one day
*
* - Query the XEnv metrics for last one hour/day with granularity based on cycle time gap
* - 1. Group the XEnvs by SC,
* - 2. For each SC:
* - - - Take the average of 2 XEnv's CPU usages
* - - - Calculate exponential average by calling PortMetricsProcessor.processFEAdaptMetrics()
* - - - - (persists cpuPercentBusy, emaPercentBusy and avgCpuPercentBusy)
*
* - Average of all SC's avgCpuPercentBusy values is the average CPU usage for the system
*/
log.info("Collecting CPU usage for XtremIO system {}", system.getNativeGuid());
// Collect metrics for last one hour always. We are not using from-time to to-time because of machine time zone differences.
Long lastProcessedTime = system.getLastMeteringRunTime();
Long currentTime = System.currentTimeMillis();
Long oneDayTime = TimeUnit.DAYS.toMillis(1);
String timeFrame = XtremIOConstants.LAST_HOUR;
String granularity = XtremIOConstants.TEN_MINUTES;
if (lastProcessedTime < 0 || ((currentTime - lastProcessedTime) >= oneDayTime)) {
// last 1 day
timeFrame = XtremIOConstants.LAST_DAY;
granularity = XtremIOConstants.ONE_HOUR;
}
XtremIOPerformanceResponse response = xtremIOClient.getXtremIOObjectPerformance(xtremIOClusterName, XtremIOConstants.XTREMIO_ENTITY_TYPE.XEnv.name(), XtremIOConstants.TIME_FRAME, timeFrame, XtremIOConstants.GRANULARITY, granularity);
log.info("Response - Members: {}", Arrays.toString(response.getMembers()));
log.info("Response - Counters: {}", Arrays.deepToString(response.getCounters()));
// Segregate the responses by XEnv
ArrayListMultimap<String, Double> xEnvToCPUvalues = ArrayListMultimap.create();
int xEnvIndex = getIndexForAttribute(response.getMembers(), XtremIOConstants.NAME);
int cpuIndex = getIndexForAttribute(response.getMembers(), XtremIOConstants.AVG_CPU_USAGE);
String[][] counters = response.getCounters();
for (String[] counter : counters) {
log.debug(Arrays.toString(counter));
String xEnv = counter[xEnvIndex];
String cpuUtilization = counter[cpuIndex];
if (cpuUtilization != null) {
xEnvToCPUvalues.put(xEnv, Double.valueOf(cpuUtilization));
}
}
// calculate the average usage for each XEnv for the queried period of time
Map<String, Double> xEnvToAvgCPU = new HashMap<>();
for (String xEnv : xEnvToCPUvalues.keySet()) {
List<Double> cpuUsageList = xEnvToCPUvalues.get(xEnv);
Double avgCPU = cpuUsageList.stream().mapToDouble(Double::doubleValue).sum() / cpuUsageList.size();
log.info("XEnv: {}, collected CPU usage: {}, average: {}", xEnv, cpuUsageList.toString(), avgCPU);
xEnvToAvgCPU.put(xEnv, avgCPU);
}
// calculate the average usage for each Storage controller (from it's 2 XEnvs)
Map<URI, Double> scToAvgCPU = new HashMap<>();
for (String xEnv : xEnvToAvgCPU.keySet()) {
StorageHADomain sc = getStorageControllerForXEnv(xEnv, system, dbClient);
if (sc == null) {
log.debug("StorageHADomain not found for XEnv {}", xEnv);
continue;
}
Double scCPU = scToAvgCPU.get(sc.getId());
Double xEnvCPU = xEnvToAvgCPU.get(xEnv);
Double avgScCPU = (scCPU == null) ? xEnvCPU : ((xEnvCPU + scCPU) / 2.0);
scToAvgCPU.put(sc.getId(), avgScCPU);
}
// calculate exponential average for each Storage controller
double emaFactor = PortMetricsProcessor.getEmaFactor(DiscoveredDataObject.Type.valueOf(system.getSystemType()));
if (emaFactor > 1.0) {
// in case of invalid user input
emaFactor = 1.0;
}
for (URI scURI : scToAvgCPU.keySet()) {
Double avgScCPU = scToAvgCPU.get(scURI);
StorageHADomain sc = dbClient.queryObject(StorageHADomain.class, scURI);
log.info("StorageHADomain: {}, average CPU Usage: {}", sc.getAdapterName(), avgScCPU);
portMetricsProcessor.processFEAdaptMetrics(avgScCPU, 0l, sc, currentTime.toString(), false);
StringMap dbMetrics = sc.getMetrics();
Double scAvgBusy = MetricsKeys.getDouble(MetricsKeys.avgPercentBusy, dbMetrics);
Double scEmaBusy = MetricsKeys.getDouble(MetricsKeys.emaPercentBusy, dbMetrics);
Double scPercentBusy = (scAvgBusy * emaFactor) + ((1 - emaFactor) * scEmaBusy);
MetricsKeys.putDouble(MetricsKeys.avgCpuPercentBusy, scPercentBusy, dbMetrics);
MetricsKeys.putLong(MetricsKeys.lastProcessingTime, currentTime, dbMetrics);
sc.setMetrics(dbMetrics);
dbClient.updateObject(sc);
}
// calculate storage system's average CPU usage by combining all XEnvs
portMetricsProcessor.computeStorageSystemAvgPortMetrics(system.getId());
}
use of com.emc.storageos.db.client.model.StorageHADomain in project coprhd-controller by CoprHD.
the class XtremIOMetricsCollector method getStorageControllerForXEnv.
/**
* Gets the storage controller (StorageHADomain) for the given XEnv name.
*/
private StorageHADomain getStorageControllerForXEnv(String xEnv, StorageSystem system, DbClient dbClient) {
StorageHADomain haDomain = null;
String haDomainNativeGUID = NativeGUIDGenerator.generateNativeGuid(system, xEnv.substring(0, xEnv.lastIndexOf(Constants.HYPHEN)), NativeGUIDGenerator.ADAPTER);
URIQueryResultList haDomainQueryResult = new URIQueryResultList();
dbClient.queryByConstraint(AlternateIdConstraint.Factory.getStorageHADomainByNativeGuidConstraint(haDomainNativeGUID), haDomainQueryResult);
Iterator<URI> itr = haDomainQueryResult.iterator();
if (itr.hasNext()) {
haDomain = dbClient.queryObject(StorageHADomain.class, itr.next());
}
return haDomain;
}
Aggregations