use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class VNXFileSystemUsageProcessor method processResult.
@SuppressWarnings("unchecked")
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
_logger.info("processing fileshare usage response" + resultObj);
final PostMethod result = (PostMethod) resultObj;
try {
DbClient dbClient = (DbClient) keyMap.get(Constants.dbClient);
ResponsePacket responsePacket = (ResponsePacket) _unmarshaller.unmarshal(result.getResponseBodyAsStream());
if (null != responsePacket.getPacketFault()) {
Status status = responsePacket.getPacketFault();
List<Problem> problems = status.getProblem();
Iterator<Problem> problemsItr = problems.iterator();
while (problemsItr.hasNext()) {
Problem problem = problemsItr.next();
_logger.error("Fault response received due to {} possible cause {}", problem.getDescription(), problem.getDiagnostics());
}
} else {
List<Object> fsUsageInfo = getQueryStatsResponse(responsePacket);
final List<Stat> statList = (List<Stat>) keyMap.get(Constants._Stats);
processFileShareInfo(fsUsageInfo, keyMap, statList, dbClient);
_zeroRecordGenerator.identifyRecordstobeZeroed(keyMap, statList, FileShare.class);
}
} catch (final IOException ioEx) {
_logger.error("IOException occurred while processing the Fileshare capacity response due to {}", ioEx.getMessage());
throw new VNXFilePluginException("IOException occurred while processing the Fileshare capacity response.", ioEx.getCause());
} catch (final Exception ex) {
_logger.error("Exception occurred while processing the Fileshare capacity response due to {}", ex.getMessage());
throw new VNXFilePluginException("Exception occurred while processing the Fileshare capacity response.", ex.getCause());
} finally {
result.releaseConnection();
}
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class VNXSnapshotProcessor method processResult.
@SuppressWarnings("unchecked")
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
_logger.info("processing snapshot response" + resultObj);
final PostMethod result = (PostMethod) resultObj;
try {
ResponsePacket responsePacket = (ResponsePacket) _unmarshaller.unmarshal(result.getResponseBodyAsStream());
Status status = null;
if (null != responsePacket.getPacketFault()) {
status = responsePacket.getPacketFault();
processErrorStatus(status, keyMap);
} else {
List<Object> snapshotList = getQueryResponse(responsePacket);
// file system check point info
getSnapTotalCapacityOfFileSystems(snapshotList, keyMap);
Iterator<Object> snapshotItr = snapshotList.iterator();
if (snapshotItr.hasNext()) {
status = (Status) snapshotItr.next();
if (status.getMaxSeverity() == Severity.OK) {
final List<Stat> statList = (List<Stat>) keyMap.get(VNXFileConstants.STATS);
Iterator<Stat> statsIterator = statList.iterator();
while (statsIterator.hasNext()) {
Stat stat = statsIterator.next();
fetchSnapShotDetails(stat, snapshotList);
}
} else {
processErrorStatus(status, keyMap);
}
}
}
} catch (final Exception ex) {
_logger.error("Exception occurred while processing the snapshot response due to {}", ex.getMessage());
} finally {
result.releaseConnection();
}
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class VNXStoragePortStatsProcessor method processResult.
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
final PostMethod result = (PostMethod) resultObj;
_logger.info("processing moversStats response" + resultObj);
try {
List<Stat> newstatsList = null;
Map<String, List<String>> interPortMap = null;
AccessProfile profile = (AccessProfile) keyMap.get(Constants.ACCESSPROFILE);
List<Stat> statsList = (List<Stat>) keyMap.get(VNXFileConstants.STATS);
final DbClient dbClient = (DbClient) keyMap.get(VNXFileConstants.DBCLIENT);
/*
* step --> 1 get the interface map for mover and interface map contain values as storage ports
* <MoverId, Map<interfaceIP, list<physicalPortName>>
*/
Map<String, Map<String, List<String>>> moverInterMap = (Map<String, Map<String, List<String>>>) keyMap.get(VNXFileConstants.INTREFACE_PORT_MAP);
ResponsePacket responsePacket = (ResponsePacket) _unmarshaller.unmarshal(result.getResponseBodyAsStream());
List<Object> moversStats = getQueryStatsResponse(responsePacket);
Iterator<Object> iterator = moversStats.iterator();
// get the storagesystem from db
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, profile.getSystemId());
// process Mover stats contains samples for each data mover and calculate port metrics
while (iterator.hasNext()) {
MoverNetStats moverNetStats = (MoverNetStats) iterator.next();
// process mover stats per data mover
String moverId = moverNetStats.getMover();
// get interfaces and their list ports for mover id
interPortMap = moverInterMap.get(moverId);
// get the sample data of mover or VDM
List<MoverNetStats.Sample> sampleList = moverNetStats.getSample();
Map<String, BigInteger> stringMapPortIOs = new HashMap<String, BigInteger>();
/*
* step -->2 get the io-ops of physical ports from samples
* <physicalPortName, Big(input + output band)>
*/
getPortIOTraffic(sampleList, stringMapPortIOs);
// stats sample time
long sampleTime = sampleList.get(0).getTime();
/* step -->3 process the port metrics and update storageport object and store in db */
newstatsList = processPortStatsInfo(interPortMap, stringMapPortIOs, storageSystem, dbClient, sampleTime);
// finally add to stat object
statsList.addAll(newstatsList);
}
// calculate the avg port utilization for VDM and store in db
portMetricsProcessor.dataMoverAvgPortMetrics(profile.getSystemId());
} catch (final Exception ex) {
_logger.error("Exception occurred while processing the volume stats response due to {}", ex.getMessage());
} finally {
result.releaseConnection();
}
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class VNXVolumeStatsProcessor method processResult.
@SuppressWarnings("unchecked")
@Override
public void processResult(Operation operation, Object resultObj, Map<String, Object> keyMap) throws BaseCollectionException {
final PostMethod result = (PostMethod) resultObj;
_logger.info("processing volumeStats response" + resultObj);
try {
ResponsePacket responsePacket = (ResponsePacket) _unmarshaller.unmarshal(result.getResponseBodyAsStream());
List<Object> volumeStats = getQueryStatsResponse(responsePacket);
Iterator<Object> iterator = volumeStats.iterator();
Map<String, String> volFileMap = (Map<String, String>) keyMap.get(VNXFileConstants.VOLFILESHAREMAP);
List<Stat> statsList = (List<Stat>) keyMap.get(VNXFileConstants.STATS);
// and proceed with rest of the Mover stats.
while (iterator.hasNext()) {
VolumeSetStats volStats = (VolumeSetStats) iterator.next();
List<Sample> sampleList = volStats.getSample();
processVolumeSampleList(sampleList, keyMap, volFileMap, statsList);
}
} catch (final Exception ex) {
_logger.error("Exception occurred while processing the volume stats response due to {}", ex.getMessage());
} finally {
result.releaseConnection();
}
}
use of com.emc.storageos.db.client.model.Stat in project coprhd-controller by CoprHD.
the class DummyDBClient method queryTimeSeries.
@Override
public <T extends DataPoint> void queryTimeSeries(Class<? extends TimeSeries> tsType, DateTime timeBucket, TimeBucket bucket, TimeSeriesQueryResult<T> callback, ExecutorService workerThreads) throws DatabaseException {
if (timeBucket != null) {
MicrosecondsClock clock = new MicrosecondsClock();
UUID uuid = TimeUUIDUtils.getTimeUUID(clock);
// For timeBucket 2012-01-04T00:00 we return 10 events
if (timeBucket.toString().contains("2012-01-01T00:00")) {
try {
for (int i = 0; i < 10; i++) {
Stat st = new Stat();
st.setProject(new URI("http://project" + i));
st.setTenant(new URI("http://t." + i));
st.setUser(new URI("http://u." + i));
st.setVirtualPool(new URI("http://vpool.gold" + i));
callback.data((T) st, TimeUUIDUtils.getTimeFromUUID(uuid));
}
} catch (URISyntaxException e) {
_logger.error(e.getMessage(), e);
}
callback.done();
} else if (timeBucket.toString().contains("2012-01-02T00:00")) {
throw DatabaseException.retryables.dummyClientFailed();
} else if (timeBucket.toString().contains("2012-01-03T00:00")) {
callback.error(null);
} else if (timeBucket.toString().contains("2012-01-04T00:00")) {
try {
// TODO Auto-generated method stub
for (int i = 0; i < 10; i++) {
Event evt = new Event();
evt.setProjectId(new URI("http://project" + i));
evt.setEventId(String.valueOf(i));
evt.setTenantId(new URI("http://t." + i));
evt.setUserId(new URI("http://u." + i));
evt.setVirtualPool(new URI("http://vpool.gold" + i));
callback.data((T) evt, TimeUUIDUtils.getTimeFromUUID(uuid));
}
} catch (URISyntaxException e) {
_logger.error(e.getMessage(), e);
}
callback.done();
} else if (timeBucket.toString().contains("2012-01-05T00:00")) {
try {
throw new MarshallingExcetion("marshalling Exception", null);
} catch (MarshallingExcetion e) {
_logger.error(e.getMessage(), e);
}
} else if (timeBucket.toString().contains("2012-01-06T00:00")) {
callback.error(null);
} else if (timeBucket.toString().contains("2012-01-07T00:00")) {
try {
// TODO Auto-generated method stub
for (int i = 0; i < 10; i++) {
AuditLog log = new AuditLog();
log.setProductId("productId." + i);
log.setTenantId(new URI("http://tenant." + i));
log.setUserId(new URI("http://user." + i));
log.setServiceType("serviceType" + i);
log.setAuditType("auditType" + i);
log.setDescription("description" + i);
callback.data((T) log, TimeUUIDUtils.getTimeFromUUID(uuid));
}
} catch (URISyntaxException e) {
_logger.error(e.getMessage(), e);
}
callback.done();
} else if (timeBucket.toString().contains("2012-01-08T00:00")) {
try {
throw new MarshallingExcetion("marshalling Exception", null);
} catch (MarshallingExcetion e) {
_logger.error(e.getMessage(), e);
}
}
}
}
Aggregations