Search in sources :

Example 41 with Statistics

use of org.apache.geode.Statistics in project geode by apache.

the class PartitionedRegionStatsJUnitTest method validateStats.

/**
   * This method verifies that PR statistics are working properly for a PartitionedRegion.
   * putsCompleted, getsCompleted, createsCompleted, destroysCompleted, containsKeyCompleted,
   * containsValueForKeyCompleted, invalidatesCompleted, totalBucketSize and temporarily commented
   * avgRedundantCopies, maxRedundantCopies, minRedundantCopies are validated in this method.
   */
private void validateStats(PartitionedRegion pr) throws Exception {
    Statistics stats = pr.getPrStats().getStats();
    int bucketCount = stats.get("bucketCount").intValue();
    int putsCompleted = stats.get("putsCompleted").intValue();
    int totalBucketSize = stats.get("dataStoreEntryCount").intValue();
    assertEquals(0, bucketCount);
    assertEquals(0, putsCompleted);
    assertEquals(0, totalBucketSize);
    int totalGets = 0;
    final int bucketMax = pr.getTotalNumberOfBuckets();
    for (int i = 0; i < bucketMax + 1; i++) {
        Long val = new Long(i);
        try {
            pr.put(val, val);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 0; i < bucketMax + 1; i++) {
        Long val = new Long(i);
        try {
            pr.get(val);
            totalGets++;
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    bucketCount = stats.get("bucketCount").intValue();
    putsCompleted = stats.get("putsCompleted").intValue();
    totalBucketSize = stats.get("dataStoreEntryCount").intValue();
    assertEquals(bucketMax, bucketCount);
    assertEquals(bucketMax + 1, putsCompleted);
    assertEquals(bucketMax + 1, totalBucketSize);
    pr.destroy(new Long(bucketMax));
    putsCompleted = stats.get("putsCompleted").intValue();
    totalBucketSize = stats.get("dataStoreEntryCount").intValue();
    assertEquals(bucketMax, bucketCount);
    assertEquals(bucketMax + 1, putsCompleted);
    assertEquals(bucketMax, totalBucketSize);
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        String val = "" + i;
        try {
            pr.create(key, val);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.get(key);
            totalGets++;
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.containsKey(key);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.containsValueForKey(key);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    for (int i = 200; i < 210; i++) {
        Long key = new Long(i);
        try {
            pr.invalidate(key);
        } catch (PartitionedRegionStorageException ex) {
            this.logger.warning(ex);
        }
    }
    int getsCompleted = stats.get("getsCompleted").intValue();
    int createsCompleted = stats.get("createsCompleted").intValue();
    int containsKeyCompleted = stats.get("containsKeyCompleted").intValue();
    int containsValueForKeyCompleted = stats.get("containsValueForKeyCompleted").intValue();
    int invalidatesCompleted = stats.get("invalidatesCompleted").intValue();
    int destroysCompleted = stats.get("destroysCompleted").intValue();
    assertEquals(totalGets, getsCompleted);
    assertEquals(10, createsCompleted);
    assertEquals(10, containsKeyCompleted);
    assertEquals(10, containsValueForKeyCompleted);
    assertEquals(10, invalidatesCompleted);
    assertEquals(1, destroysCompleted);
// Redundant copies related statistics
/*
     * int maxRedundantCopies = stats.get("maxRedundantCopies").intValue(); int minRedundantCopies =
     * stats.get("minRedundantCopies").intValue(); int avgRedundantCopies =
     * stats.get("avgRedundantCopies").intValue();
     * 
     * assertIndexDetailsEquals(minRedundantCopies, 2); assertIndexDetailsEquals(maxRedundantCopies,
     * 2); assertIndexDetailsEquals(avgRedundantCopies, 2);
     */
}
Also used : PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) Statistics(org.apache.geode.Statistics)

Example 42 with Statistics

use of org.apache.geode.Statistics in project geode by apache.

the class PartitionedRegionStatsDUnitTest method testClose.

////////// test methods ////////////////
@Test
public void testClose() throws Exception {
    final Host host = Host.getHost(0);
    final VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    final VM vm2 = host.getVM(2);
    final VM vm3 = host.getVM(3);
    CacheSerializableRunnable createPR = new CacheSerializableRunnable("createPrRegions") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            cache.createRegion(PR_PREFIX, createRegionAttributesForPR(REDUNDANT_COPIES, 200));
            cache.createRegion(PR_PREFIX + "1", createRegionAttributesForPR(REDUNDANT_COPIES, 200));
        }
    };
    /**
     * This class creates accessors.
     */
    CacheSerializableRunnable createAccessor = new CacheSerializableRunnable("createAccessor") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            cache.createRegion(PR_PREFIX, createRegionAttributesForPR(REDUNDANT_COPIES, 0));
            cache.createRegion(PR_PREFIX + "1", createRegionAttributesForPR(REDUNDANT_COPIES, 0));
        }
    };
    /**
     * This class does put, get, create, destroy, invalidate, containsKey and containsValueForKey
     * operations on PartitionedRegion.
     */
    CacheSerializableRunnable doRegionOps = new CacheSerializableRunnable("doRegionOps") {

        public void run2() {
        }

        public void doOps(Integer opTypeInteger) throws CacheException {
            Cache cache = getCache();
            int opType = opTypeInteger.intValue();
            Region pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX);
            assertNotNull(pr);
            doRegionOpsOnPR(pr, opType);
            pr = cache.getRegion(Region.SEPARATOR + PR_PREFIX + "1");
            assertNotNull(pr);
            doRegionOpsOnPR(pr, opType);
        }

        private void doRegionOpsOnPR(Region pr, int opType) throws CacheException {
            switch(opType) {
                case PUT:
                    for (int k = 0; k < cnt; k++) {
                        pr.put("" + k, "" + k);
                    }
                    break;
                case GET:
                    for (int k = 0; k < cnt; k++) {
                        pr.get("" + k);
                    }
                    break;
                case CONTAINS_KEY:
                    for (int k = 0; k < cnt; k++) {
                        pr.containsKey("" + k);
                    }
                    break;
                case CONTAINS_VALUE_FOR_KEY:
                    for (int k = 0; k < cnt; k++) {
                        pr.containsValueForKey("" + k);
                    }
                    break;
                case INVALIDATE:
                    for (int k = 0; k < cnt; k++) {
                        pr.invalidate("" + k);
                    }
                    break;
                case DESTROY:
                    for (int k = 0; k < cnt; k++) {
                        pr.destroy("" + k);
                    }
                    break;
                case CREATE:
                    for (int k = 0; k < cnt; k++) {
                        pr.create("1" + k, "1" + k);
                    }
                    break;
                case GET_ENTRY:
                    for (int k = 0; k < cnt; k++) {
                        pr.getEntry(Integer.toString(k));
                    }
            }
        }
    };
    /**
     * This class disconnects the VM from DistributedSystem.
     */
    CacheSerializableRunnable disconnectVM = new // TODO bug36296
    CacheSerializableRunnable("disconnectVM") {

        public void run2() {
            Cache cache = getCache();
            DistributedSystem ds = cache.getDistributedSystem();
            ds.disconnect();
        }
    };
    /**
     * This class validates the min, max and avg Redundant Copies PR Statistics before disconnecting
     * VM.
     */
    CacheSerializableRunnable validateRedundantCopiesStats = new CacheSerializableRunnable("validateStats") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX);
            assertNotNull(pr);
            Statistics stats = pr.getPrStats().getStats();
            int minRedundantCopies = stats.get("minRedundantCopies").intValue();
            int maxRedundantCopies = stats.get("maxRedundantCopies").intValue();
            int avgRedundantCopies = stats.get("avgRedundantCopies").intValue();
            assertEquals(minRedundantCopies, 1);
            assertEquals(maxRedundantCopies, 1);
            assertEquals(avgRedundantCopies, 1);
        }
    };
    /**
     * This class validates the min, max and avg redundant copies PR Statistics after disconnecting
     * VM.
     */
    CacheSerializableRunnable validateRedundantCopiesStatsAfterDisconnect = new CacheSerializableRunnable("validateRedundantCopiesStatsAfterDisconnect") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX);
            assertNotNull(pr);
            Statistics stats = pr.getPrStats().getStats();
            int minRedundantCopies = stats.get("minRedundantCopies").intValue();
            int maxRedundantCopies = stats.get("maxRedundantCopies").intValue();
            int avgRedundantCopies = stats.get("avgRedundantCopies").intValue();
            assertEquals(minRedundantCopies, 1);
            assertEquals(maxRedundantCopies, 1);
            assertEquals(avgRedundantCopies, 1);
        }
    };
    /**
     * This class validates PartitionedRegion operations related statistics. PRStatistics for put,
     * get, create, invalidate, containsKey, containsValueForKey and destroys are validated.
     */
    CacheSerializableRunnable validatePartitionedRegionOpsStats = new CacheSerializableRunnable("validatePartitionedRegionOpsStats") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX);
            assertNotNull(pr);
            pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX);
            assertNotNull(pr);
            Statistics stats = pr.getPrStats().getStats();
            int putsCompleted = stats.get("putsCompleted").intValue();
            int getsCompleted = stats.get("getsCompleted").intValue();
            int getEntrysCompleted = stats.get("getEntryCompleted").intValue();
            int createsCompleted = stats.get("createsCompleted").intValue();
            int containsKeyCompleted = stats.get("containsKeyCompleted").intValue();
            int containsValueForKeyCompleted = stats.get("containsValueForKeyCompleted").intValue();
            int invalidatesCompleted = stats.get("invalidatesCompleted").intValue();
            int destroysCompleted = stats.get("destroysCompleted").intValue();
            assertEquals(cnt, putsCompleted);
            assertEquals(cnt, getsCompleted);
            assertEquals(cnt, getEntrysCompleted);
            assertEquals(0, createsCompleted);
            assertEquals(cnt, containsKeyCompleted);
            assertEquals(cnt, containsValueForKeyCompleted);
            assertEquals(cnt, invalidatesCompleted);
            assertEquals(cnt, destroysCompleted);
        // TODO: Need to validate that bucket stats add up....
        // pr.getDataStore().getCachePerfStats().getGets(); // etc...
        }
    };
    // Create PRs on 3 VMs and accessors on 1 VM
    vm0.invoke(createPR);
    vm1.invoke(createPR);
    vm2.invoke(createPR);
    vm3.invoke(createAccessor);
    // Do Region operations.
    Object[] put = { new Integer(PUT) };
    vm0.invoke(doRegionOps, "doOps", put);
    Object[] get = { new Integer(GET) };
    vm0.invoke(doRegionOps, "doOps", get);
    Object[] getEntry = { new Integer(GET_ENTRY) };
    vm0.invoke(doRegionOps, "doOps", getEntry);
    Object[] containsKey = { new Integer(CONTAINS_KEY) };
    vm0.invoke(doRegionOps, "doOps", containsKey);
    Object[] containsValueForKey = { new Integer(CONTAINS_VALUE_FOR_KEY) };
    vm0.invoke(doRegionOps, "doOps", containsValueForKey);
    Object[] invalidate = { new Integer(INVALIDATE) };
    vm0.invoke(doRegionOps, "doOps", invalidate);
    Object[] destroy = { new Integer(DESTROY) };
    vm0.invoke(doRegionOps, "doOps", destroy);
    vm0.invoke(validatePartitionedRegionOpsStats);
    CacheSerializableRunnable destroyRegion = new CacheSerializableRunnable("destroyRegion") {

        public void run2() throws CacheException {
            Cache cache = getCache();
            PartitionedRegion pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX + "1");
            assertNotNull("Null region is " + pr.getName(), pr);
            pr.destroyRegion();
            pr = (PartitionedRegion) cache.getRegion(Region.SEPARATOR + PR_PREFIX);
            assertNotNull("Null region is " + pr.getName(), pr);
            pr.destroyRegion();
        }
    };
    vm0.invoke(destroyRegion);
/*
     * Redundant Copies related statistics validation vm0.invoke(validateRedundantCopiesStats);
     * 
     * vm1.invoke(disconnectVM);
     * 
     * Thread.sleep(20000);
     * 
     * vm0.invoke(validateRedundantCopiesStatsAfterDisconnect);
     */
}
Also used : CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) VM(org.apache.geode.test.dunit.VM) Region(org.apache.geode.cache.Region) Host(org.apache.geode.test.dunit.Host) DistributedSystem(org.apache.geode.distributed.DistributedSystem) Statistics(org.apache.geode.Statistics) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 43 with Statistics

use of org.apache.geode.Statistics in project geode by apache.

the class FileSizeLimitIntegrationTest method setUp.

@Before
public void setUp() throws Exception {
    this.dir = this.temporaryFolder.getRoot();
    this.archiveFileName = new File(this.dir, this.testName.getMethodName() + ".gfs").getAbsolutePath();
    this.factory = new LocalStatisticsFactory(null);
    this.statisticDescriptors = new StatisticDescriptor[] { this.factory.createIntCounter("stat1", "description of stat1", "units", true) };
    this.statisticsType = factory.createType("statisticsType1", "statisticsType1", this.statisticDescriptors);
    this.statistics = factory.createAtomicStatistics(this.statisticsType, "statistics1", 1);
    Answer<Statistics[]> statisticsAnswer = new Answer<Statistics[]>() {

        public Statistics[] answer(InvocationOnMock invocation) throws Throwable {
            return factory.getStatistics();
        }
    };
    Answer<Integer> modCountAnswer = new Answer<Integer>() {

        public Integer answer(InvocationOnMock invocation) throws Throwable {
            return factory.getStatListModCount();
        }
    };
    StatisticsSampler sampler = mock(StatisticsSampler.class);
    when(sampler.getStatistics()).thenAnswer(statisticsAnswer);
    when(sampler.getStatisticsModCount()).thenAnswer(modCountAnswer);
    StatArchiveHandlerConfig config = mock(StatArchiveHandlerConfig.class);
    when(config.getArchiveFileName()).thenReturn(new File(this.archiveFileName));
    when(config.getArchiveFileSizeLimit()).thenReturn(FILE_SIZE_LIMIT);
    when(config.getSystemId()).thenReturn(1L);
    when(config.getSystemStartTime()).thenReturn(System.currentTimeMillis());
    when(config.getSystemDirectoryPath()).thenReturn(this.temporaryFolder.getRoot().getAbsolutePath());
    when(config.getProductDescription()).thenReturn(this.testName.getMethodName());
    when(config.getArchiveDiskSpaceLimit()).thenReturn(0L);
    this.sampleCollector = new SampleCollector(sampler);
    this.sampleCollector.initialize(config, this.timer.getTime(), new MainWithChildrenRollingFileHandler());
    this.timer.reset();
    this.nanosTimeStamp = this.timer.getLastResetTime() - getNanoRate();
}
Also used : Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) MainWithChildrenRollingFileHandler(org.apache.geode.internal.io.MainWithChildrenRollingFileHandler) File(java.io.File) Statistics(org.apache.geode.Statistics) Before(org.junit.Before)

Example 44 with Statistics

use of org.apache.geode.Statistics in project geode by apache.

the class GemFireStatSamplerIntegrationTest method testSampleRate.

/**
   * Tests the statistics sample rate within an acceptable margin of error.
   */
@Test
public void testSampleRate() throws Exception {
    connect(createGemFireProperties());
    GemFireStatSampler statSampler = getGemFireStatSampler();
    assertTrue(statSampler.waitForInitialization(5000));
    assertEquals(STAT_SAMPLE_RATE, statSampler.getSampleRate());
    assertTrue(getStatisticsManager().getStatListModCount() > 0);
    List<Statistics> statistics = getStatisticsManager().getStatsList();
    assertNotNull(statistics);
    assertTrue(statistics.size() > 0);
    StatisticsType statSamplerType = getStatisticsManager().findType("StatSampler");
    Statistics[] statsArray = getStatisticsManager().findStatisticsByType(statSamplerType);
    assertEquals(1, statsArray.length);
    final Statistics statSamplerStats = statsArray[0];
    final int initialSampleCount = statSamplerStats.getInt("sampleCount");
    final int expectedSampleCount = initialSampleCount + 2;
    waitForExpectedStatValue(statSamplerStats, "sampleCount", expectedSampleCount, 5000, 10);
}
Also used : StatisticsType(org.apache.geode.StatisticsType) Statistics(org.apache.geode.Statistics) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 45 with Statistics

use of org.apache.geode.Statistics in project geode by apache.

the class GemFireStatSamplerIntegrationTest method testStop.

/**
   * Invokes stop() and then validates that the sampler did in fact stop.
   */
@Test
public void testStop() throws Exception {
    connect(createGemFireProperties());
    GemFireStatSampler statSampler = getGemFireStatSampler();
    assertTrue(statSampler.waitForInitialization(5000));
    // validate the stat sampler is running
    StatisticsType statSamplerType = getStatisticsManager().findType("StatSampler");
    Statistics[] statsArray = getStatisticsManager().findStatisticsByType(statSamplerType);
    assertEquals(1, statsArray.length);
    final Statistics statSamplerStats = statsArray[0];
    final int initialSampleCount = statSamplerStats.getInt("sampleCount");
    final int expectedSampleCount = initialSampleCount + 2;
    waitForStatSample(statSamplerStats, expectedSampleCount, 20000, 10);
    // stop the stat sampler
    statSampler.stop();
    // validate the stat sampler has stopped
    final int stoppedSampleCount = statSamplerStats.getInt("sampleCount");
    // the following should timeout rather than complete
    assertStatValueDoesNotChange(statSamplerStats, "sampleCount", stoppedSampleCount, 5000, 10);
    assertEquals(stoppedSampleCount, statSamplerStats.getInt("sampleCount"));
}
Also used : StatisticsType(org.apache.geode.StatisticsType) Statistics(org.apache.geode.Statistics) FlakyTest(org.apache.geode.test.junit.categories.FlakyTest) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

Statistics (org.apache.geode.Statistics)74 StatisticsType (org.apache.geode.StatisticsType)36 Test (org.junit.Test)34 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)29 StatisticDescriptor (org.apache.geode.StatisticDescriptor)18 File (java.io.File)17 ArrayList (java.util.ArrayList)12 List (java.util.List)12 StatValue (org.apache.geode.internal.statistics.StatArchiveReader.StatValue)11 TestStatArchiveWriter (org.apache.geode.internal.statistics.TestStatArchiveWriter)10 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)6 LRUStatistics (org.apache.geode.internal.cache.lru.LRUStatistics)6 HashMap (java.util.HashMap)5 LinuxProcFsStatistics (org.apache.geode.internal.statistics.platform.LinuxProcFsStatistics)5 Iterator (java.util.Iterator)4 Map (java.util.Map)4 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)4 MainWithChildrenRollingFileHandler (org.apache.geode.internal.io.MainWithChildrenRollingFileHandler)3 Before (org.junit.Before)3 IOException (java.io.IOException)2