Search in sources :

Example 71 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class BackupDUnitTest method createOverflowRegion.

protected void createOverflowRegion(final VM vm) {
    SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            dsf.setDiskDirs(getDiskDirs(getUniqueName()));
            dsf.setMaxOplogSize(1);
            DiskStore ds = dsf.create(getUniqueName());
            RegionFactory rf = new RegionFactory();
            rf.setDiskStoreName(ds.getName());
            rf.setDiskSynchronous(true);
            rf.setDataPolicy(DataPolicy.REPLICATE);
            rf.setEvictionAttributes(EvictionAttributes.createLIFOEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK));
            rf.create("region3");
        }
    };
    vm.invoke(createRegion);
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) RegionFactory(org.apache.geode.cache.RegionFactory) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Cache(org.apache.geode.cache.Cache)

Example 72 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class BackupJUnitTest method backupAndRecover.

public void backupAndRecover(RegionCreator regionFactory) throws IOException, InterruptedException {
    Region region = regionFactory.createRegion();
    // Put enough data to roll some oplogs
    for (int i = 0; i < 1024; i++) {
        region.put(i, getBytes(i));
    }
    for (int i = 0; i < 512; i++) {
        region.destroy(i);
    }
    for (int i = 1024; i < 2048; i++) {
        region.put(i, getBytes(i));
    }
    // This section of the test is for bug 43951
    findDiskStore().forceRoll();
    // add a put to the current crf
    region.put("junk", "value");
    // do a destroy of a key in a previous oplog
    region.destroy(2047);
    // do a destroy of the key in the current crf
    region.destroy("junk");
    // the current crf is now all garbage but
    // we need to keep the drf around since the older
    // oplog has a create that it deletes.
    findDiskStore().forceRoll();
    // restore the deleted entry.
    region.put(2047, getBytes(2047));
    for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
        store.flush();
    }
    cache.close();
    createCache();
    region = regionFactory.createRegion();
    validateEntriesExist(region, 512, 2048);
    for (int i = 0; i < 512; i++) {
        assertNull(region.get(i));
    }
    BackupManager backup = cache.startBackup(cache.getInternalDistributedSystem().getDistributedMember());
    backup.prepareBackup();
    backup.finishBackup(backupDir, null, false);
    // Put another key to make sure we restore
    // from a backup that doesn't contain this key
    region.put("A", "A");
    cache.close();
    // Make sure the restore script refuses to run before we destroy the files.
    restoreBackup(true);
    // Make sure the disk store is unaffected by the failed restore
    createCache();
    region = regionFactory.createRegion();
    validateEntriesExist(region, 512, 2048);
    for (int i = 0; i < 512; i++) {
        assertNull(region.get(i));
    }
    assertEquals("A", region.get("A"));
    region.put("B", "B");
    cache.close();
    // destroy the disk directories
    destroyDiskDirs();
    // Now the restore script should work
    restoreBackup(false);
    // Make sure the cache has the restored backup
    createCache();
    region = regionFactory.createRegion();
    validateEntriesExist(region, 512, 2048);
    for (int i = 0; i < 512; i++) {
        assertNull(region.get(i));
    }
    assertNull(region.get("A"));
    assertNull(region.get("B"));
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) Region(org.apache.geode.cache.Region) BackupManager(org.apache.geode.internal.cache.persistence.BackupManager)

Example 73 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithUpdates.

@Test
public void testTwoEntriesWithUpdates() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithUpdates";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(2, dsi.getDeadRecordCount());
    assertEquals(2, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    {
        krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
        assertEquals(true, krfFile.exists());
    }
    // compare file sizes
    // After offline compaction, 2 create entries + 2 update without key entries
    // become 2 update with key entries. No more OPLOG_NEW_ENTRY_BASE_REC.
    // The RVV now contains a single member
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    updatesize1 += getStrSizeInOplog("key1");
    updatesize2 += getStrSizeInOplog("key2");
    assertEquals(crfsize + updatesize1 + updatesize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drfFile.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(2, r.size());
    assertEquals("update1", r.get("key1"));
    assertEquals("update2", r.get("key2"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 74 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithRegionClear.

@Test
public void testTwoEntriesWithRegionClear() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithRegionClear";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    r.remove("key2");
    r.clear();
    Region r2 = cache.createRegion("r2", af.create());
    // Put something live in the oplog to keep it alive.
    // This is needed because we now force a roll during ds close
    // so that a krf will be generated for the last oplog.
    r2.put("r2key1", "rwvalue1");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    // write extra RVV and RVVGC for
    int clearsize_in_crf = getRVVSize(1, new int[] { 1 }, false);
    // clear operation
    // write extra RVV and RVVGC for
    int clearsize_in_drf = getRVVSize(1, new int[] { 1 }, true);
    // clear operation
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    // 1 tombstone without key
    int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
    int createsize3 = getSize4Create(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2 + tombstonesize1 + createsize3 + clearsize_in_crf, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true) + clearsize_in_drf, drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(5, dsi.getDeadRecordCount());
    assertEquals(1, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
    assertEquals(true, krfFile.exists());
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    // offline compaction changed the only create-entry to be an update-with-key entry
    int updatesize3 = getSize4UpdateWithKey(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 1, 1 }, false);
    assertEquals(crfsize + updatesize3, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 1, 0 }, true), drfFile.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(0, r.size());
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 75 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithRegionDestroy.

@Test
public void testTwoEntriesWithRegionDestroy() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithRegionDestroy";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    // create a dummy region to keep diskstore files "alive" once we destroy the real region
    cache.createRegion("r2dummy", af.create());
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    r.remove("key2");
    r.clear();
    r.localDestroyRegion();
    Region r2 = cache.createRegion("r2", af.create());
    // Put something live in the oplog to keep it alive.
    // This is needed because we now force a roll during ds close
    // so that a krf will be generated for the last oplog.
    r2.put("r2key1", "rwvalue1");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    // write extra RVV and RVVGC for
    int clearsize_in_crf = getRVVSize(1, new int[] { 1 }, false);
    // clear operation
    // write extra RVV and RVVGC for
    int clearsize_in_drf = getRVVSize(1, new int[] { 1 }, true);
    // clear operation
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    // 1 tombstone without key
    int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
    int createsize3 = getSize4Create(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2 + tombstonesize1 + createsize3 + clearsize_in_crf, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true) + clearsize_in_drf, drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(5, dsi.getDeadRecordCount());
    assertEquals(1, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
    assertEquals(true, krfFile.exists());
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    // offline compaction changed the only create-entry to be an update-with-key entry
    int updatesize3 = getSize4UpdateWithKey(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 1, 1 }, false);
    assertEquals(crfsize + updatesize3, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 0, 0 }, true), drfFile.length());
    // Now we preallocate spaces for if files and also crfs and drfs. So the below check is not true
    // any more.
    // if (originalIfLength <= ifFile.length()) {
    // fail("expected " + ifFile.length() + " to be < " + originalIfLength);
    // }
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(0, r.size());
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

DiskStore (org.apache.geode.cache.DiskStore)190 Test (org.junit.Test)120 AttributesFactory (org.apache.geode.cache.AttributesFactory)91 DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)91 File (java.io.File)79 Region (org.apache.geode.cache.Region)71 Cache (org.apache.geode.cache.Cache)61 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)54 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)46 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)44 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)39 LocalRegion (org.apache.geode.internal.cache.LocalRegion)32 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)31 VM (org.apache.geode.test.dunit.VM)28 DiskRegion (org.apache.geode.internal.cache.DiskRegion)24 Host (org.apache.geode.test.dunit.Host)23 Expectations (org.jmock.Expectations)23 InternalCache (org.apache.geode.internal.cache.InternalCache)21 UnitTest (org.apache.geode.test.junit.categories.UnitTest)21 IOException (java.io.IOException)20