Search in sources :

Example 56 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class PdxRenameJUnitTest method testGetPdxTypes.

@Test
public void testGetPdxTypes() throws Exception {
    String DS_NAME = "PdxRenameJUnitTestDiskStore";
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    File f = new File(DS_NAME);
    f.mkdir();
    try {
        final Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
        try {
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            dsf.setDiskDirs(new File[] { f });
            dsf.create(DS_NAME);
            RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.LOCAL_PERSISTENT);
            rf1.setDiskStoreName(DS_NAME);
            Region<String, PdxValue> region1 = rf1.create("region1");
            region1.put("key1", new PdxValue(1));
            cache.close();
            Collection<PdxType> types = DiskStoreImpl.getPdxTypes(DS_NAME, new File[] { f });
            assertEquals(1, types.size());
            assertEquals(PdxValue.class.getName(), types.iterator().next().getClassName());
        } finally {
            if (!cache.isClosed()) {
                cache.close();
            }
        }
    } finally {
        FileUtils.deleteDirectory(f);
    }
}
Also used : PdxType(org.apache.geode.pdx.internal.PdxType) Properties(java.util.Properties) CacheFactory(org.apache.geode.cache.CacheFactory) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 57 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class BackupJUnitTest method testCompactionDuringBackup.

@Test
public void testCompactionDuringBackup() throws IOException, InterruptedException {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setDiskDirs(diskDirs);
    dsf.setMaxOplogSize(1);
    dsf.setAutoCompact(false);
    dsf.setAllowForceCompaction(true);
    dsf.setCompactionThreshold(20);
    String name = "diskStore";
    DiskStoreImpl ds = (DiskStoreImpl) dsf.create(name);
    Region region = createRegion();
    // Put enough data to roll some oplogs
    for (int i = 0; i < 1024; i++) {
        region.put(i, getBytes(i));
    }
    RestoreScript script = new RestoreScript();
    ds.startBackup(backupDir, null, script);
    for (int i = 2; i < 1024; i++) {
        assertTrue(region.destroy(i) != null);
    }
    assertTrue(ds.forceCompaction());
    // Put another key to make sure we restore
    // from a backup that doesn't contain this key
    region.put("A", "A");
    ds.finishBackup(new BackupManager(cache.getInternalDistributedSystem().getDistributedMember(), cache));
    script.generate(backupDir);
    cache.close();
    destroyDiskDirs();
    restoreBackup(false);
    createCache();
    ds = createDiskStore();
    region = createRegion();
    validateEntriesExist(region, 0, 1024);
    assertNull(region.get("A"));
}
Also used : Region(org.apache.geode.cache.Region) RestoreScript(org.apache.geode.internal.cache.persistence.RestoreScript) BackupManager(org.apache.geode.internal.cache.persistence.BackupManager) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 58 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithUpdates.

@Test
public void testTwoEntriesWithUpdates() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithUpdates";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(2, dsi.getDeadRecordCount());
    assertEquals(2, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    {
        krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
        assertEquals(true, krfFile.exists());
    }
    // compare file sizes
    // After offline compaction, 2 create entries + 2 update without key entries
    // become 2 update with key entries. No more OPLOG_NEW_ENTRY_BASE_REC.
    // The RVV now contains a single member
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    updatesize1 += getStrSizeInOplog("key1");
    updatesize2 += getStrSizeInOplog("key2");
    assertEquals(crfsize + updatesize1 + updatesize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drfFile.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(2, r.size());
    assertEquals("update1", r.get("key1"));
    assertEquals("update2", r.get("key2"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 59 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithRegionClear.

@Test
public void testTwoEntriesWithRegionClear() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithRegionClear";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    r.remove("key2");
    r.clear();
    Region r2 = cache.createRegion("r2", af.create());
    // Put something live in the oplog to keep it alive.
    // This is needed because we now force a roll during ds close
    // so that a krf will be generated for the last oplog.
    r2.put("r2key1", "rwvalue1");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    // write extra RVV and RVVGC for
    int clearsize_in_crf = getRVVSize(1, new int[] { 1 }, false);
    // clear operation
    // write extra RVV and RVVGC for
    int clearsize_in_drf = getRVVSize(1, new int[] { 1 }, true);
    // clear operation
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    // 1 tombstone without key
    int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
    int createsize3 = getSize4Create(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2 + tombstonesize1 + createsize3 + clearsize_in_crf, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true) + clearsize_in_drf, drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(5, dsi.getDeadRecordCount());
    assertEquals(1, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
    assertEquals(true, krfFile.exists());
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    // offline compaction changed the only create-entry to be an update-with-key entry
    int updatesize3 = getSize4UpdateWithKey(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 1, 1 }, false);
    assertEquals(crfsize + updatesize3, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 1, 0 }, true), drfFile.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(0, r.size());
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 60 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithRegionDestroy.

@Test
public void testTwoEntriesWithRegionDestroy() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithRegionDestroy";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    // create a dummy region to keep diskstore files "alive" once we destroy the real region
    cache.createRegion("r2dummy", af.create());
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    r.remove("key2");
    r.clear();
    r.localDestroyRegion();
    Region r2 = cache.createRegion("r2", af.create());
    // Put something live in the oplog to keep it alive.
    // This is needed because we now force a roll during ds close
    // so that a krf will be generated for the last oplog.
    r2.put("r2key1", "rwvalue1");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    // write extra RVV and RVVGC for
    int clearsize_in_crf = getRVVSize(1, new int[] { 1 }, false);
    // clear operation
    // write extra RVV and RVVGC for
    int clearsize_in_drf = getRVVSize(1, new int[] { 1 }, true);
    // clear operation
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    // 1 tombstone without key
    int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
    int createsize3 = getSize4Create(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2 + tombstonesize1 + createsize3 + clearsize_in_crf, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true) + clearsize_in_drf, drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(5, dsi.getDeadRecordCount());
    assertEquals(1, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
    assertEquals(true, krfFile.exists());
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    // offline compaction changed the only create-entry to be an update-with-key entry
    int updatesize3 = getSize4UpdateWithKey(extra_byte_num_per_entry, "r2key1", "rwvalue1");
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 1, 1 }, false);
    assertEquals(crfsize + updatesize3, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(2, new int[] { 0, 0 }, true), drfFile.length());
    // Now we preallocate spaces for if files and also crfs and drfs. So the below check is not true
    // any more.
    // if (originalIfLength <= ifFile.length()) {
    // fail("expected " + ifFile.length() + " to be < " + originalIfLength);
    // }
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(0, r.size());
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)132 File (java.io.File)95 DiskStore (org.apache.geode.cache.DiskStore)91 Test (org.junit.Test)86 AttributesFactory (org.apache.geode.cache.AttributesFactory)56 Region (org.apache.geode.cache.Region)46 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)46 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)35 Cache (org.apache.geode.cache.Cache)32 LocalRegion (org.apache.geode.internal.cache.LocalRegion)24 RegionFactory (org.apache.geode.cache.RegionFactory)22 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)21 DiskRegion (org.apache.geode.internal.cache.DiskRegion)19 Properties (java.util.Properties)18 VM (org.apache.geode.test.dunit.VM)18 LRUStatistics (org.apache.geode.internal.cache.lru.LRUStatistics)16 GatewaySenderFactory (org.apache.geode.cache.wan.GatewaySenderFactory)12 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)11 IOException (java.io.IOException)10 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)10