Search in sources :

Example 66 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class CacheServerImpl method getAttribFactoryForClientMessagesRegion.

public static AttributesFactory getAttribFactoryForClientMessagesRegion(InternalCache cache, String ePolicy, int capacity, String overflowDir, boolean isDiskStore) throws InvalidValueException, GemFireIOException {
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.LOCAL);
    if (isDiskStore) {
        // overflowDir parameter is actually diskstore name
        factory.setDiskStoreName(overflowDir);
        // client subscription queue is always overflow to disk, so do async
        // see feature request #41479
        factory.setDiskSynchronous(true);
    } else if (overflowDir == null || overflowDir.equals(ClientSubscriptionConfig.DEFAULT_OVERFLOW_DIRECTORY)) {
        factory.setDiskStoreName(null);
        // client subscription queue is always overflow to disk, so do async
        // see feature request #41479
        factory.setDiskSynchronous(true);
    } else {
        File dir = new File(overflowDir + File.separatorChar + generateNameForClientMsgsRegion(OSProcess.getId()));
        // This will delete the overflow directory when virtual machine terminates.
        dir.deleteOnExit();
        if (!dir.mkdirs() && !dir.isDirectory()) {
            throw new GemFireIOException("Could not create client subscription overflow directory: " + dir.getAbsolutePath());
        }
        File[] dirs = { dir };
        DiskStoreFactory dsf = cache.createDiskStoreFactory();
        dsf.setAutoCompact(true).setDiskDirsAndSizes(dirs, new int[] { MAX_VALUE }).create("bsi");
        factory.setDiskStoreName("bsi");
        // backward compatibility, it was sync
        factory.setDiskSynchronous(true);
    }
    factory.setDataPolicy(DataPolicy.NORMAL);
    // enable statistics
    factory.setStatisticsEnabled(true);
    /* setting LIFO related eviction attributes */
    if (HARegionQueue.HA_EVICTION_POLICY_ENTRY.equals(ePolicy)) {
        factory.setEvictionAttributes(EvictionAttributes.createLIFOEntryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
    } else if (HARegionQueue.HA_EVICTION_POLICY_MEMORY.equals(ePolicy)) {
        // condition refinement
        factory.setEvictionAttributes(EvictionAttributes.createLIFOMemoryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
    } else {
        // throw invalid eviction policy exception
        throw new InvalidValueException(LocalizedStrings.CacheServerImpl__0_INVALID_EVICTION_POLICY.toLocalizedString(ePolicy));
    }
    return factory;
}
Also used : InvalidValueException(org.apache.geode.InvalidValueException) AttributesFactory(org.apache.geode.cache.AttributesFactory) GemFireIOException(org.apache.geode.GemFireIOException) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory)

Example 67 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testForceRollTwoEntriesWithUpdateAndDestroy.

@Test
public void testForceRollTwoEntriesWithUpdateAndDestroy() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testForceRollTwoEntriesWithUpdateAndDestroy";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    File krf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    // extra key to keep oplog1 from being empty
    r.put("key0", "value0");
    r.put("key1", "value1");
    r.put("key2", "value2");
    diskStore.forceRoll();
    r.put("key1", "update1");
    r.put("key2", "update2");
    r.remove("key2");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize0 = getSize4Create(extra_byte_num_per_entry, "key0", "value0");
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key1", "update1");
    int updatesize2 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key2", "update2");
    int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
    assertEquals(crfsize + createsize0 + createsize1 + createsize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    assertEquals(crfsize + updatesize1 + updatesize2 + tombstonesize1, crf2File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(3, dsi.getDeadRecordCount());
    assertEquals(3, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    assertEquals(false, crf2File.exists());
    assertEquals(false, drf2File.exists());
    assertEquals(false, krf2File.exists());
    File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
    File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
    File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
    assertEquals(true, crf3File.exists());
    assertEquals(true, drf3File.exists());
    assertEquals(true, krf3File.exists());
    // after offline compaction, rvv is reset, and only 3 update-with-key,
    // i.e. key0, key1, key2(tombstone) in _3.crf
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    int updatesize0 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key0", "value0");
    tombstonesize1 = getSize4TombstoneWithKey(extra_byte_num_per_entry, "key2");
    assertEquals(crfsize + updatesize0 + updatesize1 + tombstonesize1, crf3File.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf3File.length());
    // Now we preallocate spaces for if files and also crfs and drfs. So the below check is not true
    // any more.
    // if (originalIfLength <= ifFile.length()) {
    // fail("expected " + ifFile.length() + " to be < " + originalIfLength);
    // }
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(2, r.size());
    assertEquals("value0", r.get("key0"));
    assertEquals("update1", r.get("key1"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 68 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesNoCompact.

@Test
public void testTwoEntriesNoCompact() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesNoCompact";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    cache.close();
    ds.disconnect();
    System.out.println("empty rvv size = " + getRVVSize(0, null, false));
    System.out.println("empty rvvgc size = " + getRVVSize(1, new int[] { 0 }, true));
    System.out.println("1 member rvv size = " + getRVVSize(1, new int[] { 1 }, false));
    System.out.println("2 member rvv size = " + getRVVSize(2, new int[] { 1, 1 }, false));
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(0, dsi.getDeadRecordCount());
    assertEquals(2, dsi.getLiveEntryCount());
    assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, new int[] { 0 }, true), drfFile.length());
    // offline compaction should not have created a new oplog.
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(2, r.size());
    assertEquals("value1", r.get("key1"));
    assertEquals("value2", r.get("key2"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 69 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithUpdateAndDestroy.

@Test
public void testTwoEntriesWithUpdateAndDestroy() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithUpdateAndDestroy";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    r.remove("key2");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    // 1 tombstone without key
    int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2 + tombstonesize1, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(3, dsi.getDeadRecordCount());
    assertEquals(2, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    {
        krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
        assertEquals(true, krfFile.exists());
    }
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    // compare file sizes
    // After offline compaction, only one update with key and one tombstone with key are left
    // No more OPLOG_NEW_ENTRY_BASE_REC.
    // The crf now contains an RVV with one entry
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    updatesize1 += getStrSizeInOplog("key1");
    tombstonesize1 += getStrSizeInOplog("key2");
    assertEquals(crfsize + updatesize1 + tombstonesize1, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drfFile.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(1, r.size());
    assertEquals("update1", r.get("key1"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 70 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class DiskRegionIllegalArguementsJUnitTest method testTimeInterval.

@Test
public void testTimeInterval() {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    try {
        dsf.setTimeInterval(-1);
        fail("expected IllegalArgumentException");
    } catch (IllegalArgumentException e) {
    }
    dsf.setTimeInterval(1);
    assertEquals(dsf.create("test").getTimeInterval(), 1);
}
Also used : DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)132 File (java.io.File)95 DiskStore (org.apache.geode.cache.DiskStore)91 Test (org.junit.Test)86 AttributesFactory (org.apache.geode.cache.AttributesFactory)56 Region (org.apache.geode.cache.Region)46 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)46 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)35 Cache (org.apache.geode.cache.Cache)32 LocalRegion (org.apache.geode.internal.cache.LocalRegion)24 RegionFactory (org.apache.geode.cache.RegionFactory)22 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)21 DiskRegion (org.apache.geode.internal.cache.DiskRegion)19 Properties (java.util.Properties)18 VM (org.apache.geode.test.dunit.VM)18 LRUStatistics (org.apache.geode.internal.cache.lru.LRUStatistics)16 GatewaySenderFactory (org.apache.geode.cache.wan.GatewaySenderFactory)12 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)11 IOException (java.io.IOException)10 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)10