Search in sources :

Example 1 with StatisticsFactory

use of org.apache.geode.StatisticsFactory in project geode by apache.

the class DLockService method getOrCreateStats.

/** Get or create static dlock stats */
protected static synchronized DistributedLockStats getOrCreateStats(DistributedSystem ds) {
    if (stats == DUMMY_STATS) {
        Assert.assertTrue(ds != null, "Need an instance of InternalDistributedSystem");
        StatisticsFactory statFactory = ds;
        long statId = OSProcess.getId();
        stats = new DLockStats(statFactory, statId);
    }
    return stats;
}
Also used : StatisticsFactory(org.apache.geode.StatisticsFactory)

Example 2 with StatisticsFactory

use of org.apache.geode.StatisticsFactory in project geode by apache.

the class DiskInitFileJUnitTest method testKrfIds.

@Test
public void testKrfIds() {
    // create a mock statistics factory for creating directory holders
    final StatisticsFactory sf = context.mock(StatisticsFactory.class);
    context.checking(new Expectations() {

        {
            ignoring(sf);
        }
    });
    // Add a mock region to the init file so it doesn't
    // delete the file when the init file is closed
    final DiskRegionView drv = context.mock(DiskRegionView.class);
    context.checking(new Expectations() {

        {
            ignoring(drv);
        }
    });
    // Create a mock disk store impl. All we need to do is return
    // this init file directory.
    final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
    context.checking(new Expectations() {

        {
            allowing(parent).getInfoFileDir();
            will(returnValue(new DirectoryHolder(sf, testDirectory, 0, 0)));
            ignoring(parent);
        }
    });
    DiskInitFile dif = new DiskInitFile("testKrfIds", parent, false, Collections.<File>emptySet());
    assertEquals(false, dif.hasKrf(1));
    dif.cmnKrfCreate(1);
    assertEquals(true, dif.hasKrf(1));
    assertEquals(false, dif.hasKrf(2));
    dif.cmnKrfCreate(2);
    assertEquals(true, dif.hasKrf(2));
    dif.createRegion(drv);
    dif.forceCompaction();
    dif.close();
    dif = new DiskInitFile("testKrfIds", parent, true, Collections.<File>emptySet());
    assertEquals(true, dif.hasKrf(1));
    assertEquals(true, dif.hasKrf(2));
    dif.cmnCrfDelete(1);
    assertEquals(false, dif.hasKrf(1));
    assertEquals(true, dif.hasKrf(2));
    dif.cmnCrfDelete(2);
    assertEquals(false, dif.hasKrf(2));
    dif.createRegion(drv);
    dif.forceCompaction();
    dif.close();
    dif = new DiskInitFile("testKrfIds", parent, true, Collections.<File>emptySet());
    assertEquals(false, dif.hasKrf(1));
    assertEquals(false, dif.hasKrf(2));
    dif.destroy();
}
Also used : Expectations(org.jmock.Expectations) StatisticsFactory(org.apache.geode.StatisticsFactory) File(java.io.File) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 3 with StatisticsFactory

use of org.apache.geode.StatisticsFactory in project geode by apache.

the class SimpleDiskRegionJUnitTest method testGetChild.

// /*
// * Test method for
// * 'org.apache.geode.internal.cache.SimpleDiskRegion.basicInitializeOwner()'
// */
// @Test
// public void testBasicInitializeOwner()
// {
// deleteFiles();
// region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps);
// DiskRegion dr = ((LocalRegion)region).getDiskRegion();
// put100Int();
// assertIndexDetailsEquals(new Integer(1), region.get(new Integer(1)));
// Oplog oplog = dr.getChild();
// int id = oplog.getOplogId();
// StatisticsFactory factory = dr.getOwner().getCache().getDistributedSystem();
// Oplog newOplog = new Oplog(id + 1, dr.getDiskStore(), new DirectoryHolder(factory,
// dirs[0], 1000000, 0));
// dr.setChild(newOplog);
// region.clear();
// newOplog = dr.getChild();
// assertIndexDetailsEquals(null, region.get(new Integer(1)));
// try {
// dr.addToOplogSet(id, new File(oplog.getOplogFile()
// .getPath()), dr.getNextDir());
// }
// catch (Exception e) {
// logWriter
// .error(
// "Exception in synching data present in the buffers of RandomAccessFile of Oplog, to the disk",
// e);
// fail("Test failed because synching of data present in buffer of RandomAccesFile ");
// }
// oplog.close();
// dr.setIsRecovering(true);
// dr.basicInitializeOwner();
// assertIndexDetailsEquals(new Integer(1), region.get(new Integer(1)));
// closeDown();
// }
/*
   * Test method for 'org.apache.geode.internal.cache.SimpleDiskRegion.getChild()'
   */
@Test
public void testGetChild() {
    deleteFiles();
    region = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache, diskProps);
    DiskRegion dr = ((LocalRegion) region).getDiskRegion();
    Oplog oplog = dr.testHook_getChild();
    long id = oplog.getOplogId();
    StatisticsFactory factory = region.getCache().getDistributedSystem();
    Oplog newOplog = new Oplog(id, dr.getOplogSet(), new DirectoryHolder(factory, dirs[0], 1000000, 0));
    dr.getDiskStore().persistentOplogs.setChild(newOplog);
    assertEquals(newOplog, dr.testHook_getChild());
    dr.setChild(oplog);
    assertEquals(oplog, dr.testHook_getChild());
    newOplog.close();
    newOplog = null;
    closeDown();
}
Also used : StatisticsFactory(org.apache.geode.StatisticsFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 4 with StatisticsFactory

use of org.apache.geode.StatisticsFactory in project geode by apache.

the class OplogRVVJUnitTest method testRecoverRVV.

@Test
public void testRecoverRVV() throws UnknownHostException {
    final DiskInitFile df = context.mock(DiskInitFile.class);
    final LogWriterI18n logger = context.mock(LogWriterI18n.class);
    final GemFireCacheImpl cache = context.mock(GemFireCacheImpl.class);
    // Create a mock disk store impl.
    final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
    final StatisticsFactory sf = context.mock(StatisticsFactory.class);
    final DiskStoreID ownerId = DiskStoreID.random();
    final DiskStoreID m1 = DiskStoreID.random();
    final DiskStoreID m2 = DiskStoreID.random();
    final DiskRecoveryStore drs = context.mock(DiskRecoveryStore.class);
    context.checking(new Expectations() {

        {
            ignoring(sf);
            allowing(df).getOrCreateCanonicalId(m1);
            will(returnValue(1));
            allowing(df).getOrCreateCanonicalId(m2);
            will(returnValue(2));
            allowing(df).getOrCreateCanonicalId(ownerId);
            will(returnValue(3));
            allowing(df).getCanonicalObject(1);
            will(returnValue(m1));
            allowing(df).getCanonicalObject(2);
            will(returnValue(m2));
            allowing(df).getCanonicalObject(3);
            will(returnValue(ownerId));
            ignoring(df);
        }
    });
    DirectoryHolder dirHolder = new DirectoryHolder(sf, testDirectory, 0, 0);
    context.checking(new Expectations() {

        {
            ignoring(logger);
            allowing(cache).getLoggerI18n();
            will(returnValue(logger));
            allowing(cache).cacheTimeMillis();
            will(returnValue(System.currentTimeMillis()));
            allowing(parent).getCache();
            will(returnValue(cache));
            allowing(parent).getMaxOplogSizeInBytes();
            will(returnValue(10000L));
            allowing(parent).getName();
            will(returnValue("test"));
            allowing(parent).getStats();
            will(returnValue(new DiskStoreStats(sf, "stats")));
            allowing(parent).getDiskInitFile();
            will(returnValue(df));
            allowing(parent).getDiskStoreID();
            will(returnValue(DiskStoreID.random()));
        }
    });
    final DiskRegionVersionVector rvv = new DiskRegionVersionVector(ownerId);
    rvv.recordVersion(m1, 0);
    rvv.recordVersion(m1, 1);
    rvv.recordVersion(m1, 2);
    rvv.recordVersion(m1, 10);
    rvv.recordVersion(m1, 7);
    rvv.recordVersion(m2, 0);
    rvv.recordVersion(m2, 1);
    rvv.recordVersion(m2, 2);
    rvv.recordGCVersion(m1, 1);
    rvv.recordGCVersion(m2, 0);
    // create the oplog
    final AbstractDiskRegion diskRegion = context.mock(AbstractDiskRegion.class);
    final PersistentOplogSet oplogSet = context.mock(PersistentOplogSet.class);
    final Map<Long, AbstractDiskRegion> map = new HashMap<Long, AbstractDiskRegion>();
    map.put(5L, diskRegion);
    context.checking(new Expectations() {

        {
            allowing(diskRegion).getRegionVersionVector();
            will(returnValue(rvv));
            allowing(diskRegion).getRVVTrusted();
            will(returnValue(true));
            allowing(parent).getAllDiskRegions();
            will(returnValue(map));
            allowing(oplogSet).getCurrentlyRecovering(5L);
            will(returnValue(drs));
            allowing(oplogSet).getParent();
            will(returnValue(parent));
            ignoring(oplogSet);
            ignoring(parent);
            allowing(diskRegion).getFlags();
            will(returnValue(EnumSet.of(DiskRegionFlag.IS_WITH_VERSIONING)));
        }
    });
    Map<Long, AbstractDiskRegion> regions = parent.getAllDiskRegions();
    Oplog oplog = new Oplog(1, oplogSet, dirHolder);
    oplog.close();
    context.checking(new Expectations() {

        {
            one(drs).recordRecoveredGCVersion(m1, 1);
            one(drs).recordRecoveredGCVersion(m2, 0);
            one(drs).recordRecoveredVersonHolder(ownerId, rvv.getMemberToVersion().get(ownerId), true);
            one(drs).recordRecoveredVersonHolder(m1, rvv.getMemberToVersion().get(m1), true);
            one(drs).recordRecoveredVersonHolder(m2, rvv.getMemberToVersion().get(m2), true);
            one(drs).setRVVTrusted(true);
        }
    });
    oplog = new Oplog(1, oplogSet);
    Collection<File> drfFiles = FileUtils.listFiles(testDirectory, new String[] { "drf" }, true);
    assertEquals(1, drfFiles.size());
    Collection<File> crfFiles = FileUtils.listFiles(testDirectory, new String[] { "crf" }, true);
    assertEquals(1, crfFiles.size());
    oplog.addRecoveredFile(drfFiles.iterator().next(), dirHolder);
    oplog.addRecoveredFile(crfFiles.iterator().next(), dirHolder);
    OplogEntryIdSet deletedIds = new OplogEntryIdSet();
    oplog.recoverDrf(deletedIds, false, true);
    oplog.recoverCrf(deletedIds, true, true, false, Collections.singleton(oplog), true);
    context.assertIsSatisfied();
}
Also used : Expectations(org.jmock.Expectations) DiskRegionVersionVector(org.apache.geode.internal.cache.versions.DiskRegionVersionVector) HashMap(java.util.HashMap) OplogEntryIdSet(org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) LogWriterI18n(org.apache.geode.i18n.LogWriterI18n) StatisticsFactory(org.apache.geode.StatisticsFactory) DiskStoreID(org.apache.geode.internal.cache.persistence.DiskStoreID) File(java.io.File) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 5 with StatisticsFactory

use of org.apache.geode.StatisticsFactory in project geode by apache.

the class OffHeapStorageJUnitTest method testCreateOffHeapStorage.

@Test
public void testCreateOffHeapStorage() {
    StatisticsFactory localStatsFactory = new LocalStatisticsFactory(null);
    OutOfOffHeapMemoryListener ooohml = mock(OutOfOffHeapMemoryListener.class);
    MemoryAllocator ma = OffHeapStorage.basicCreateOffHeapStorage(localStatsFactory, 1024 * 1024, ooohml);
    try {
        OffHeapMemoryStats stats = ma.getStats();
        assertNotNull(stats.getStats());
        assertEquals(1024 * 1024, stats.getFreeMemory());
        assertEquals(1024 * 1024, stats.getMaxMemory());
        assertEquals(0, stats.getUsedMemory());
        assertEquals(0, stats.getDefragmentations());
        assertEquals(0, stats.getDefragmentationsInProgress());
        assertEquals(0, stats.getDefragmentationTime());
        assertEquals(0, stats.getFragmentation());
        assertEquals(1, stats.getFragments());
        assertEquals(1024 * 1024, stats.getLargestFragment());
        assertEquals(0, stats.getObjects());
        assertEquals(0, stats.getReads());
        stats.incFreeMemory(100);
        assertEquals(1024 * 1024 + 100, stats.getFreeMemory());
        stats.incFreeMemory(-100);
        assertEquals(1024 * 1024, stats.getFreeMemory());
        stats.incMaxMemory(100);
        assertEquals(1024 * 1024 + 100, stats.getMaxMemory());
        stats.incMaxMemory(-100);
        assertEquals(1024 * 1024, stats.getMaxMemory());
        stats.incUsedMemory(100);
        assertEquals(100, stats.getUsedMemory());
        stats.incUsedMemory(-100);
        assertEquals(0, stats.getUsedMemory());
        stats.incObjects(100);
        assertEquals(100, stats.getObjects());
        stats.incObjects(-100);
        assertEquals(0, stats.getObjects());
        stats.incReads();
        assertEquals(1, stats.getReads());
        stats.setFragmentation(100);
        assertEquals(100, stats.getFragmentation());
        stats.setFragmentation(0);
        assertEquals(0, stats.getFragmentation());
        stats.setFragments(2);
        assertEquals(2, stats.getFragments());
        stats.setFragments(1);
        assertEquals(1, stats.getFragments());
        stats.setLargestFragment(100);
        assertEquals(100, stats.getLargestFragment());
        stats.setLargestFragment(1024 * 1024);
        assertEquals(1024 * 1024, stats.getLargestFragment());
        boolean originalEnableClockStats = DistributionStats.enableClockStats;
        DistributionStats.enableClockStats = true;
        try {
            long start = stats.startDefragmentation();
            assertEquals(1, stats.getDefragmentationsInProgress());
            while (DistributionStats.getStatTime() == start) {
                Thread.yield();
            }
            stats.endDefragmentation(start);
            assertEquals(1, stats.getDefragmentations());
            assertEquals(0, stats.getDefragmentationsInProgress());
            assertTrue(stats.getDefragmentationTime() > 0);
        } finally {
            DistributionStats.enableClockStats = originalEnableClockStats;
        }
        stats.incObjects(100);
        stats.incUsedMemory(100);
        stats.setFragmentation(100);
        OffHeapStorage ohs = (OffHeapStorage) stats;
        ohs.initialize(new NullOffHeapMemoryStats());
        assertEquals(0, stats.getFreeMemory());
        assertEquals(0, stats.getMaxMemory());
        assertEquals(0, stats.getUsedMemory());
        assertEquals(0, stats.getDefragmentations());
        assertEquals(0, stats.getDefragmentationsInProgress());
        assertEquals(0, stats.getDefragmentationTime());
        assertEquals(0, stats.getFragmentation());
        assertEquals(0, stats.getFragments());
        assertEquals(0, stats.getLargestFragment());
        assertEquals(0, stats.getObjects());
        assertEquals(0, stats.getReads());
        OutOfOffHeapMemoryException ex = null;
        try {
            ma.allocate(1024 * 1024 + 1);
            fail("expected OutOfOffHeapMemoryException");
        } catch (OutOfOffHeapMemoryException expected) {
            ex = expected;
        }
        verify(ooohml).outOfOffHeapMemory(ex);
        try {
            ma.allocate(1024 * 1024 + 1);
            fail("expected OutOfOffHeapMemoryException");
        } catch (OutOfOffHeapMemoryException expected) {
            ex = expected;
        }
        verify(ooohml).outOfOffHeapMemory(ex);
    } finally {
        System.setProperty(MemoryAllocatorImpl.FREE_OFF_HEAP_MEMORY_PROPERTY, "true");
        try {
            ma.close();
        } finally {
            System.clearProperty(MemoryAllocatorImpl.FREE_OFF_HEAP_MEMORY_PROPERTY);
        }
    }
}
Also used : OutOfOffHeapMemoryException(org.apache.geode.OutOfOffHeapMemoryException) LocalStatisticsFactory(org.apache.geode.internal.statistics.LocalStatisticsFactory) StatisticsFactory(org.apache.geode.StatisticsFactory) LocalStatisticsFactory(org.apache.geode.internal.statistics.LocalStatisticsFactory) Test(org.junit.Test) UnitTest(org.apache.geode.test.junit.categories.UnitTest)

Aggregations

StatisticsFactory (org.apache.geode.StatisticsFactory)15 Test (org.junit.Test)8 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)6 File (java.io.File)3 DiskRegionView (org.apache.geode.internal.cache.persistence.DiskRegionView)3 Expectations (org.jmock.Expectations)3 Statistics (org.apache.geode.Statistics)2 StatisticsType (org.apache.geode.StatisticsType)2 LocalStatisticsFactory (org.apache.geode.internal.statistics.LocalStatisticsFactory)2 UnitTest (org.apache.geode.test.junit.categories.UnitTest)2 Before (org.junit.Before)2 HashMap (java.util.HashMap)1 OutOfOffHeapMemoryException (org.apache.geode.OutOfOffHeapMemoryException)1 StatisticDescriptor (org.apache.geode.StatisticDescriptor)1 Region (org.apache.geode.cache.Region)1 CqQueryVsdStats (org.apache.geode.cache.query.internal.CqQueryVsdStats)1 InternalDistributedSystem (org.apache.geode.distributed.internal.InternalDistributedSystem)1 LogWriterI18n (org.apache.geode.i18n.LogWriterI18n)1 BucketRegion (org.apache.geode.internal.cache.BucketRegion)1 OplogEntryIdSet (org.apache.geode.internal.cache.DiskStoreImpl.OplogEntryIdSet)1