use of org.apache.geode.internal.statistics.StatArchiveReader.ResourceInst in project geode by apache.
the class StatUtils method addResourceInstsToSet.
private static void addResourceInstsToSet(final File archiveFile, final String specString, final Set<ResourceInst> resourceInsts) throws IOException {
StatSpec statSpec = new StatSpec(specString);
StatArchiveReader reader = new StatArchiveReader(new File[] { archiveFile }, new StatSpec[] { statSpec }, true);
StatValue[] statValues = reader.matchSpec(statSpec);
for (StatValue statValue : statValues) {
for (ResourceInst resourceInst : statValue.getResources()) {
resourceInsts.add(resourceInst);
}
}
}
use of org.apache.geode.internal.statistics.StatArchiveReader.ResourceInst in project geode by apache.
the class StatisticsDistributedTest method testPubAndSubCustomStats.
@Test
public void testPubAndSubCustomStats() throws Exception {
String regionName = "region_" + getName();
VM[] pubs = new VM[NUM_PUBS];
for (int pubVM = 0; pubVM < NUM_PUBS; pubVM++) {
pubs[pubVM] = getHost(0).getVM(pubVM);
}
VM sub = getHost(0).getVM(NUM_PUBS);
String subArchive = this.directory.getAbsolutePath() + File.separator + getName() + "_sub" + ".gfs";
String[] pubArchives = new String[NUM_PUBS];
for (int pubVM = 0; pubVM < NUM_PUBS; pubVM++) {
pubArchives[pubVM] = this.directory.getAbsolutePath() + File.separator + getName() + "_pub-" + pubVM + ".gfs";
}
for (int i = 0; i < NUM_PUBS; i++) {
final int pubVM = i;
pubs[pubVM].invoke("pub-connect-and-create-data-" + pubVM, () -> {
Properties props = new Properties();
props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
props.setProperty(STATISTIC_SAMPLE_RATE, "1000");
props.setProperty(STATISTIC_ARCHIVE_FILE, pubArchives[pubVM]);
InternalDistributedSystem system = getSystem(props);
// assert that sampler is working as expected
GemFireStatSampler sampler = system.getStatSampler();
assertTrue(sampler.isSamplingEnabled());
assertTrue(sampler.isAlive());
assertEquals(new File(pubArchives[pubVM]), sampler.getArchiveFileName());
await("awaiting SampleCollector to exist").atMost(30, SECONDS).until(() -> sampler.getSampleCollector() != null);
SampleCollector sampleCollector = sampler.getSampleCollector();
assertNotNull(sampleCollector);
StatArchiveHandler archiveHandler = sampleCollector.getStatArchiveHandler();
assertNotNull(archiveHandler);
assertTrue(archiveHandler.isArchiving());
// create cache and region
Cache cache = getCache();
RegionFactory<String, Number> factory = cache.createRegionFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
RegionMembershipListener rml = new RegionMembershipListener();
rmlRef.set(rml);
factory.addCacheListener(rml);
Region<String, Number> region = factory.create(regionName);
// create the keys
if (region.getAttributes().getScope() == Scope.DISTRIBUTED_ACK) {
for (int key = 0; key < NUM_KEYS; key++) {
region.create("KEY-" + key, null);
}
}
});
}
DistributedMember subMember = sub.invoke("sub-connect-and-create-keys", () -> {
Properties props = new Properties();
props.setProperty(STATISTIC_SAMPLING_ENABLED, "true");
props.setProperty(STATISTIC_SAMPLE_RATE, "1000");
props.setProperty(STATISTIC_ARCHIVE_FILE, subArchive);
InternalDistributedSystem system = getSystem(props);
PubSubStats statistics = new PubSubStats(system, "sub-1", 1);
subStatsRef.set(statistics);
// assert that sampler is working as expected
GemFireStatSampler sampler = system.getStatSampler();
assertTrue(sampler.isSamplingEnabled());
assertTrue(sampler.isAlive());
assertEquals(new File(subArchive), sampler.getArchiveFileName());
await("awaiting SampleCollector to exist").atMost(30, SECONDS).until(() -> sampler.getSampleCollector() != null);
SampleCollector sampleCollector = sampler.getSampleCollector();
assertNotNull(sampleCollector);
StatArchiveHandler archiveHandler = sampleCollector.getStatArchiveHandler();
assertNotNull(archiveHandler);
assertTrue(archiveHandler.isArchiving());
// create cache and region with UpdateListener
Cache cache = getCache();
RegionFactory<String, Number> factory = cache.createRegionFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
CacheListener<String, Number> cl = new UpdateListener(statistics);
factory.addCacheListener(cl);
Region<String, Number> region = factory.create(regionName);
// create the keys
if (region.getAttributes().getScope() == Scope.DISTRIBUTED_ACK) {
for (int key = 0; key < NUM_KEYS; key++) {
region.create("KEY-" + key, null);
}
}
assertEquals(0, statistics.getUpdateEvents());
return system.getDistributedMember();
});
for (int i = 0; i < NUM_PUBS; i++) {
final int pubVM = i;
AsyncInvocation[] publishers = new AsyncInvocation[NUM_PUB_THREADS];
for (int j = 0; j < NUM_PUB_THREADS; j++) {
final int pubThread = j;
publishers[pubThread] = pubs[pubVM].invokeAsync("pub-connect-and-put-data-" + pubVM + "-thread-" + pubThread, () -> {
PubSubStats statistics = new PubSubStats(basicGetSystem(), "pub-" + pubThread, pubVM);
pubStatsRef.set(pubThread, statistics);
RegionMembershipListener rml = rmlRef.get();
Region<String, Number> region = getCache().getRegion(regionName);
// assert that sub is in rml membership
assertNotNull(rml);
await("awaiting Membership to contain subMember").atMost(30, SECONDS).until(() -> rml.contains(subMember) && rml.size() == NUM_PUBS);
// publish lots of puts cycling through the NUM_KEYS
assertEquals(0, statistics.getPuts());
// cycle through the keys randomly
if (RANDOMIZE_PUTS) {
Random randomGenerator = new Random();
int key = 0;
for (int idx = 0; idx < MAX_PUTS; idx++) {
long start = statistics.startPut();
key = randomGenerator.nextInt(NUM_KEYS);
region.put("KEY-" + key, idx);
statistics.endPut(start);
}
// cycle through he keys in order and wrapping back around
} else {
int key = 0;
for (int idx = 0; idx < MAX_PUTS; idx++) {
long start = statistics.startPut();
region.put("KEY-" + key, idx);
// cycle through the keys...
key++;
if (key >= NUM_KEYS) {
key = 0;
}
statistics.endPut(start);
}
}
assertEquals(MAX_PUTS, statistics.getPuts());
// wait for 2 samples to ensure all stats have been archived
StatisticsType statSamplerType = getSystem().findType("StatSampler");
Statistics[] statsArray = getSystem().findStatisticsByType(statSamplerType);
assertEquals(1, statsArray.length);
Statistics statSamplerStats = statsArray[0];
int initialSampleCount = statSamplerStats.getInt(StatSamplerStats.SAMPLE_COUNT);
await("awaiting sampleCount >= 2").atMost(30, SECONDS).until(() -> statSamplerStats.getInt(StatSamplerStats.SAMPLE_COUNT) >= initialSampleCount + 2);
});
}
for (int pubThread = 0; pubThread < publishers.length; pubThread++) {
publishers[pubThread].join();
if (publishers[pubThread].exceptionOccurred()) {
fail("Test failed", publishers[pubThread].getException());
}
}
}
sub.invoke("sub-wait-for-samples", () -> {
// wait for 2 samples to ensure all stats have been archived
StatisticsType statSamplerType = getSystem().findType("StatSampler");
Statistics[] statsArray = getSystem().findStatisticsByType(statSamplerType);
assertEquals(1, statsArray.length);
Statistics statSamplerStats = statsArray[0];
int initialSampleCount = statSamplerStats.getInt(StatSamplerStats.SAMPLE_COUNT);
await("awaiting sampleCount >= 2").atMost(30, SECONDS).until(() -> statSamplerStats.getInt(StatSamplerStats.SAMPLE_COUNT) >= initialSampleCount + 2);
// now post total updateEvents to static
PubSubStats statistics = subStatsRef.get();
assertNotNull(statistics);
updateEvents.set(statistics.getUpdateEvents());
});
// validate pub values against sub values
int totalUpdateEvents = sub.invoke(() -> getUpdateEvents());
// validate pub values against pub statistics against pub archive
for (int i = 0; i < NUM_PUBS; i++) {
final int pubIdx = i;
pubs[pubIdx].invoke("pub-validation", () -> {
// add up all the puts
assertEquals(NUM_PUB_THREADS, pubStatsRef.length());
int totalPuts = 0;
for (int pubThreadIdx = 0; pubThreadIdx < NUM_PUB_THREADS; pubThreadIdx++) {
PubSubStats statistics = pubStatsRef.get(pubThreadIdx);
assertNotNull(statistics);
totalPuts += statistics.getPuts();
}
// assert that total puts adds up to max puts times num threads
assertEquals(MAX_PUTS * NUM_PUB_THREADS, totalPuts);
// assert that archive file contains same values as statistics
File archive = new File(pubArchives[pubIdx]);
assertTrue(archive.exists());
StatArchiveReader reader = new StatArchiveReader(new File[] { archive }, null, false);
double combinedPuts = 0;
List resources = reader.getResourceInstList();
assertNotNull(resources);
assertFalse(resources.isEmpty());
for (Iterator<ResourceInst> iter = resources.iterator(); iter.hasNext(); ) {
ResourceInst ri = iter.next();
if (!ri.getType().getName().equals(PubSubStats.TYPE_NAME)) {
continue;
}
StatValue[] statValues = ri.getStatValues();
for (int idx = 0; idx < statValues.length; idx++) {
String statName = ri.getType().getStats()[idx].getName();
assertNotNull(statName);
if (statName.equals(PubSubStats.PUTS)) {
StatValue sv = statValues[idx];
sv.setFilter(StatValue.FILTER_NONE);
double mostRecent = sv.getSnapshotsMostRecent();
double min = sv.getSnapshotsMinimum();
double max = sv.getSnapshotsMaximum();
double maxMinusMin = sv.getSnapshotsMaximum() - sv.getSnapshotsMinimum();
double mean = sv.getSnapshotsAverage();
double stdDev = sv.getSnapshotsStandardDeviation();
assertEquals(mostRecent, max, 0f);
double summation = 0;
double[] rawSnapshots = sv.getRawSnapshots();
for (int j = 0; j < rawSnapshots.length; j++) {
summation += rawSnapshots[j];
}
assertEquals(mean, summation / sv.getSnapshotsSize(), 0);
combinedPuts += mostRecent;
}
}
}
// assert that sum of mostRecent values for all puts equals totalPuts
assertEquals((double) totalPuts, combinedPuts, 0);
puts.getAndAdd(totalPuts);
});
}
// validate pub values against sub values
int totalCombinedPuts = 0;
for (int i = 0; i < NUM_PUBS; i++) {
int pubIdx = i;
int totalPuts = pubs[pubIdx].invoke(() -> getPuts());
assertEquals(MAX_PUTS * NUM_PUB_THREADS, totalPuts);
totalCombinedPuts += totalPuts;
}
assertEquals(totalCombinedPuts, totalUpdateEvents);
assertEquals(MAX_PUTS * NUM_PUB_THREADS * NUM_PUBS, totalCombinedPuts);
// validate sub values against sub statistics against sub archive
final int totalPuts = totalCombinedPuts;
sub.invoke("sub-validation", () -> {
PubSubStats statistics = subStatsRef.get();
assertNotNull(statistics);
int updateEvents = statistics.getUpdateEvents();
assertEquals(totalPuts, updateEvents);
assertEquals(totalUpdateEvents, updateEvents);
assertEquals(MAX_PUTS * NUM_PUB_THREADS * NUM_PUBS, updateEvents);
// assert that archive file contains same values as statistics
File archive = new File(subArchive);
assertTrue(archive.exists());
StatArchiveReader reader = new StatArchiveReader(new File[] { archive }, null, false);
double combinedUpdateEvents = 0;
List resources = reader.getResourceInstList();
for (Iterator<ResourceInst> iter = resources.iterator(); iter.hasNext(); ) {
ResourceInst ri = iter.next();
if (!ri.getType().getName().equals(PubSubStats.TYPE_NAME)) {
continue;
}
StatValue[] statValues = ri.getStatValues();
for (int i = 0; i < statValues.length; i++) {
String statName = ri.getType().getStats()[i].getName();
assertNotNull(statName);
if (statName.equals(PubSubStats.UPDATE_EVENTS)) {
StatValue sv = statValues[i];
sv.setFilter(StatValue.FILTER_NONE);
double mostRecent = sv.getSnapshotsMostRecent();
double min = sv.getSnapshotsMinimum();
double max = sv.getSnapshotsMaximum();
double maxMinusMin = sv.getSnapshotsMaximum() - sv.getSnapshotsMinimum();
double mean = sv.getSnapshotsAverage();
double stdDev = sv.getSnapshotsStandardDeviation();
assertEquals(mostRecent, max, 0);
double summation = 0;
double[] rawSnapshots = sv.getRawSnapshots();
for (int j = 0; j < rawSnapshots.length; j++) {
summation += rawSnapshots[j];
}
assertEquals(mean, summation / sv.getSnapshotsSize(), 0);
combinedUpdateEvents += mostRecent;
}
}
}
assertEquals((double) totalUpdateEvents, combinedUpdateEvents, 0);
});
int updateEvents = sub.invoke(() -> readIntStat(new File(subArchive), "PubSubStats", "updateEvents"));
assertTrue(updateEvents > 0);
assertEquals(MAX_PUTS * NUM_PUB_THREADS * NUM_PUBS, updateEvents);
int puts = 0;
for (int pubVM = 0; pubVM < NUM_PUBS; pubVM++) {
int currentPubVM = pubVM;
int vmPuts = pubs[pubVM].invoke(() -> readIntStat(new File(pubArchives[currentPubVM]), "PubSubStats", "puts"));
assertTrue(vmPuts > 0);
assertEquals(MAX_PUTS * NUM_PUB_THREADS, vmPuts);
puts += vmPuts;
}
assertTrue(puts > 0);
assertEquals(MAX_PUTS * NUM_PUB_THREADS * NUM_PUBS, puts);
// use regex "testPubAndSubCustomStats"
MultipleArchiveReader reader = new MultipleArchiveReader(this.directory, ".*" + getTestMethodName() + ".*\\.gfs");
int combinedUpdateEvents = reader.readIntStat(PubSubStats.TYPE_NAME, PubSubStats.UPDATE_EVENTS);
assertTrue("Failed to read updateEvents stat values", combinedUpdateEvents > 0);
int combinedPuts = reader.readIntStat(PubSubStats.TYPE_NAME, PubSubStats.PUTS);
assertTrue("Failed to read puts stat values", combinedPuts > 0);
assertTrue("updateEvents is " + combinedUpdateEvents + " but puts is " + combinedPuts, combinedUpdateEvents == combinedPuts);
}
use of org.apache.geode.internal.statistics.StatArchiveReader.ResourceInst in project geode by apache.
the class StatArchiveWithConsecutiveResourceInstIntegrationTest method readingFourActiveCacheClientUpdaterStatsWithReaderMatchSpec.
@Test
public void readingFourActiveCacheClientUpdaterStatsWithReaderMatchSpec() throws Exception {
StatArchiveReader reader = new StatArchiveReader(new File[] { this.archiveFile }, new StatSpec[] { this.statSpec }, true);
Set<ResourceInst> resourceInstList = new HashSet<>();
for (StatValue statValue : reader.matchSpec(this.statSpec)) {
for (int i = 0; i < statValue.getResources().length; i++) {
resourceInstList.add(statValue.getResources()[i]);
}
}
assertThat(resourceInstList.size()).isEqualTo(2);
}
Aggregations