Search in sources :

Example 61 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testForceRollTwoEntriesWithUpdates.

@Test
public void testForceRollTwoEntriesWithUpdates() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testForceRollTwoEntriesWithUpdates";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    // extra key to keep oplog1 from being empty
    r.put("key0", "value0");
    r.put("key1", "value1");
    r.put("key2", "value2");
    diskStore.forceRoll();
    r.put("key1", "update1");
    r.put("key2", "update2");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize0 = getSize4Create(extra_byte_num_per_entry, "key0", "value0");
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key1", "update1");
    int updatesize2 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key2", "update2");
    assertEquals(crfsize + createsize0 + createsize1 + createsize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    assertEquals(crfsize + updatesize1 + updatesize2, crf2File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(2, dsi.getDeadRecordCount());
    assertEquals(3, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    // oplog2 contains two updates so it remains unchanged
    assertEquals(crfsize + updatesize1 + updatesize2, crf2File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
    File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
    File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
    File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
    assertEquals(true, krf3File.exists());
    assertEquals(true, crf3File.exists());
    assertEquals(true, drf3File.exists());
    // after offline compaction, rvv is reset, and only one update-with-key, i.e. key0 in _3.crf
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    int updatesize0 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key0", "value0");
    assertEquals(crfsize + updatesize0, crf3File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf3File.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(3, r.size());
    assertEquals("value0", r.get("key0"));
    assertEquals("update1", r.get("key1"));
    assertEquals("update2", r.get("key2"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 62 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testbug41862.

@Test
public void testbug41862() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testbug41862";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.create("key1", "value1");
    // to keep this oplog from going empty
    r.create("key2", "value2");
    ((LocalRegion) r).getDiskStore().forceRoll();
    r.create("key3", "value3");
    r.remove("key1");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int createsize3 = getSize4Create(extra_byte_num_per_entry, "key3", "value3");
    // 1 tombstone with key
    int tombstonesize1 = getSize4TombstoneWithKey(extra_byte_num_per_entry, "key1");
    assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    // adjust rvv
    crfsize += (getRVVSize(1, new int[] { 1 }, false) - getRVVSize(0, null, false));
    // size
    assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(1, dsi.getDeadRecordCount());
    assertEquals(3, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    // offline compaction did not change _2.crf and _2.drf not changed.
    assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
    // offline compaction reset rvv to be empty, create-entry becomes one update-with-key-entry in
    // _3.crf,
    // since there's no creates, then there's no OPLOG_NEW_ENTRY_BASE_REC_SIZE
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key3", "value3");
    File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
    File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
    File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
    assertEquals(true, krf3File.exists());
    assertEquals(true, crf3File.exists());
    assertEquals(true, drf3File.exists());
    assertEquals(crfsize + updatesize1, crf3File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf3File.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(2, r.size());
    assertEquals("value2", r.get("key2"));
    assertEquals("value3", r.get("key3"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 63 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DeltaPropagationDUnitTest method createClientCache.

public static void createClientCache(int[] ports, String rLevel, String conflate, Integer listener, EvictionAttributes evictAttrs, ExpirationAttributes expAttrs) throws Exception {
    CacheServerTestUtil.disableShufflingOfEndpoints();
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    props.setProperty(CONFLATE_EVENTS, conflate);
    new DeltaPropagationDUnitTest().createCache(props);
    AttributesFactory factory = new AttributesFactory();
    pool = ClientServerTestCase.configureConnectionPool(factory, "localhost", ports, true, Integer.parseInt(rLevel), 2, null, 1000, 250, false, -2);
    factory.setScope(Scope.LOCAL);
    if (listener.intValue() != 0) {
        factory.addCacheListener(getCacheListener(listener.intValue()));
    }
    if (evictAttrs != null) {
        factory.setEvictionAttributes(evictAttrs);
    }
    if (expAttrs != null) {
        factory.setEntryTimeToLive(expAttrs);
    }
    if (evictAttrs != null && evictAttrs.getAction().isOverflowToDisk()) {
        // create diskstore with overflow dir
        // since it's overflow, no need to recover, so we can use random number as dir name
        int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
        File dir = new File("overflow_" + port);
        if (!dir.exists()) {
            dir.mkdir();
        }
        File[] dir1 = new File[] { dir };
        DiskStoreFactory dsf = cache.createDiskStoreFactory();
        factory.setDiskStoreName(dsf.setDiskDirs(dir1).create("client_overflow_ds").getName());
    }
    factory.setConcurrencyChecksEnabled(false);
    RegionAttributes attrs = factory.create();
    Region region = cache.createRegion(regionName, attrs);
    logger = cache.getLogger();
}
Also used : AttributesFactory(org.apache.geode.cache.AttributesFactory) RegionAttributes(org.apache.geode.cache.RegionAttributes) Region(org.apache.geode.cache.Region) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory)

Example 64 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class PersistentRecoveryOrderDUnitTest method createPersistentRegionAsync.

protected AsyncInvocation createPersistentRegionAsync(final VM vm, final boolean diskSynchronous) {
    SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            File dir = getDiskDirForVM(vm);
            dir.mkdirs();
            dsf.setDiskDirs(new File[] { dir });
            dsf.setMaxOplogSize(1);
            DiskStore ds = dsf.create(REGION_NAME);
            RegionFactory rf = new RegionFactory();
            rf.setDiskStoreName(ds.getName());
            rf.setDiskSynchronous(diskSynchronous);
            rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
            rf.setScope(Scope.DISTRIBUTED_ACK);
            rf.create(REGION_NAME);
        }
    };
    return vm.invokeAsync(createRegion);
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) RegionFactory(org.apache.geode.cache.RegionFactory) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Cache(org.apache.geode.cache.Cache)

Example 65 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class PersistentRVVRecoveryDUnitTest method testUpdateRVVWithAsyncPersistence.

/**
   * Test that with concurrent updates to an async disk region, we correctly update the RVV On disk
   */
@Test
public void testUpdateRVVWithAsyncPersistence() throws Throwable {
    Host host = Host.getHost(0);
    final VM vm0 = host.getVM(1);
    SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            File dir = getDiskDirForVM(vm0);
            dir.mkdirs();
            dsf.setDiskDirs(new File[] { dir });
            dsf.setMaxOplogSize(1);
            dsf.setQueueSize(100);
            dsf.setTimeInterval(1000);
            DiskStore ds = dsf.create(REGION_NAME);
            RegionFactory rf = new RegionFactory();
            rf.setDiskStoreName(ds.getName());
            rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
            rf.setScope(Scope.DISTRIBUTED_ACK);
            rf.setDiskSynchronous(false);
            rf.create(REGION_NAME);
        }
    };
    // Create a region with async persistence
    vm0.invoke(createRegion);
    // In two different threads, perform updates to the same key on the same region
    AsyncInvocation ins0 = vm0.invokeAsync(new SerializableRunnable("change the entry at vm0") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion(REGION_NAME);
            for (int i = 0; i < 500; i++) {
                region.put("A", "vm0-" + i);
            }
        }
    });
    AsyncInvocation ins1 = vm0.invokeAsync(new SerializableRunnable("change the entry at vm1") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion(REGION_NAME);
            for (int i = 0; i < 500; i++) {
                region.put("A", "vm1-" + i);
            }
        }
    });
    // Wait for the update threads to finish.
    ins0.getResult(MAX_WAIT);
    ins1.getResult(MAX_WAIT);
    // Make sure the async queue is flushed to disk
    vm0.invoke(new SerializableRunnable() {

        @Override
        public void run() {
            Cache cache = getCache();
            DiskStore ds = cache.findDiskStore(REGION_NAME);
            ds.flush();
        }
    });
    // Assert that the disk has seen all of the updates
    RegionVersionVector rvv = getRVV(vm0);
    RegionVersionVector diskRVV = getDiskRVV(vm0);
    assertSameRVV(rvv, diskRVV);
    // Bounce the cache and make the same assertion
    closeCache(vm0);
    vm0.invoke(createRegion);
    // Assert that the recovered RVV is the same as before the restart
    RegionVersionVector rvv2 = getRVV(vm0);
    assertSameRVV(rvv, rvv2);
    // The disk RVV should also match.
    RegionVersionVector diskRVV2 = getDiskRVV(vm0);
    assertSameRVV(rvv2, diskRVV2);
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) RegionFactory(org.apache.geode.cache.RegionFactory) VM(org.apache.geode.test.dunit.VM) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) CacheSerializableRunnable(org.apache.geode.cache30.CacheSerializableRunnable) LocalRegion(org.apache.geode.internal.cache.LocalRegion) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) DiskRegion(org.apache.geode.internal.cache.DiskRegion) Region(org.apache.geode.cache.Region) Host(org.apache.geode.test.dunit.Host) RegionVersionVector(org.apache.geode.internal.cache.versions.RegionVersionVector) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Cache(org.apache.geode.cache.Cache) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)132 File (java.io.File)95 DiskStore (org.apache.geode.cache.DiskStore)91 Test (org.junit.Test)86 AttributesFactory (org.apache.geode.cache.AttributesFactory)56 Region (org.apache.geode.cache.Region)46 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)46 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)35 Cache (org.apache.geode.cache.Cache)32 LocalRegion (org.apache.geode.internal.cache.LocalRegion)24 RegionFactory (org.apache.geode.cache.RegionFactory)22 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)21 DiskRegion (org.apache.geode.internal.cache.DiskRegion)19 Properties (java.util.Properties)18 VM (org.apache.geode.test.dunit.VM)18 LRUStatistics (org.apache.geode.internal.cache.lru.LRUStatistics)16 GatewaySenderFactory (org.apache.geode.cache.wan.GatewaySenderFactory)12 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)11 IOException (java.io.IOException)10 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)10