use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class CacheServerImpl method getAttribFactoryForClientMessagesRegion.
public static AttributesFactory getAttribFactoryForClientMessagesRegion(InternalCache cache, String ePolicy, int capacity, String overflowDir, boolean isDiskStore) throws InvalidValueException, GemFireIOException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
if (isDiskStore) {
// overflowDir parameter is actually diskstore name
factory.setDiskStoreName(overflowDir);
// client subscription queue is always overflow to disk, so do async
// see feature request #41479
factory.setDiskSynchronous(true);
} else if (overflowDir == null || overflowDir.equals(ClientSubscriptionConfig.DEFAULT_OVERFLOW_DIRECTORY)) {
factory.setDiskStoreName(null);
// client subscription queue is always overflow to disk, so do async
// see feature request #41479
factory.setDiskSynchronous(true);
} else {
File dir = new File(overflowDir + File.separatorChar + generateNameForClientMsgsRegion(OSProcess.getId()));
// This will delete the overflow directory when virtual machine terminates.
dir.deleteOnExit();
if (!dir.mkdirs() && !dir.isDirectory()) {
throw new GemFireIOException("Could not create client subscription overflow directory: " + dir.getAbsolutePath());
}
File[] dirs = { dir };
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(true).setDiskDirsAndSizes(dirs, new int[] { MAX_VALUE }).create("bsi");
factory.setDiskStoreName("bsi");
// backward compatibility, it was sync
factory.setDiskSynchronous(true);
}
factory.setDataPolicy(DataPolicy.NORMAL);
// enable statistics
factory.setStatisticsEnabled(true);
/* setting LIFO related eviction attributes */
if (HARegionQueue.HA_EVICTION_POLICY_ENTRY.equals(ePolicy)) {
factory.setEvictionAttributes(EvictionAttributes.createLIFOEntryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
} else if (HARegionQueue.HA_EVICTION_POLICY_MEMORY.equals(ePolicy)) {
// condition refinement
factory.setEvictionAttributes(EvictionAttributes.createLIFOMemoryAttributes(capacity, EvictionAction.OVERFLOW_TO_DISK));
} else {
// throw invalid eviction policy exception
throw new InvalidValueException(LocalizedStrings.CacheServerImpl__0_INVALID_EVICTION_POLICY.toLocalizedString(ePolicy));
}
return factory;
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskOfflineCompactionJUnitTest method testForceRollTwoEntriesWithUpdateAndDestroy.
@Test
public void testForceRollTwoEntriesWithUpdateAndDestroy() throws Exception {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(false);
String name = "testForceRollTwoEntriesWithUpdateAndDestroy";
DiskStore diskStore = dsf.create(name);
File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
File krf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
AttributesFactory af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region r = cache.createRegion("r", af.create());
int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
// extra key to keep oplog1 from being empty
r.put("key0", "value0");
r.put("key1", "value1");
r.put("key2", "value2");
diskStore.forceRoll();
r.put("key1", "update1");
r.put("key2", "update2");
r.remove("key2");
cache.close();
ds.disconnect();
DiskStoreImpl.validate(name, diskStore.getDiskDirs());
int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
int createsize0 = getSize4Create(extra_byte_num_per_entry, "key0", "value0");
int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key1", "update1");
int updatesize2 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key2", "update2");
int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
assertEquals(crfsize + createsize0 + createsize1 + createsize2, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
assertEquals(crfsize + updatesize1 + updatesize2 + tombstonesize1, crf2File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
long originalIfLength = ifFile.length();
DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
assertEquals(3, dsi.getDeadRecordCount());
assertEquals(3, dsi.getLiveEntryCount());
assertEquals(false, crfFile.exists());
assertEquals(false, drfFile.exists());
assertEquals(false, krfFile.exists());
assertEquals(false, crf2File.exists());
assertEquals(false, drf2File.exists());
assertEquals(false, krf2File.exists());
File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
assertEquals(true, crf3File.exists());
assertEquals(true, drf3File.exists());
assertEquals(true, krf3File.exists());
// after offline compaction, rvv is reset, and only 3 update-with-key,
// i.e. key0, key1, key2(tombstone) in _3.crf
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
int updatesize0 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key0", "value0");
tombstonesize1 = getSize4TombstoneWithKey(extra_byte_num_per_entry, "key2");
assertEquals(crfsize + updatesize0 + updatesize1 + tombstonesize1, crf3File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf3File.length());
// Now we preallocate spaces for if files and also crfs and drfs. So the below check is not true
// any more.
// if (originalIfLength <= ifFile.length()) {
// fail("expected " + ifFile.length() + " to be < " + originalIfLength);
// }
connectDSandCache();
dsf = cache.createDiskStoreFactory();
diskStore = dsf.create(name);
af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
r = cache.createRegion("r", af.create());
assertEquals(2, r.size());
assertEquals("value0", r.get("key0"));
assertEquals("update1", r.get("key1"));
// if test passed clean up files
r.destroyRegion();
diskStore.destroy();
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskOfflineCompactionJUnitTest method testTwoEntriesNoCompact.
@Test
public void testTwoEntriesNoCompact() throws Exception {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(false);
String name = "testTwoEntriesNoCompact";
DiskStore diskStore = dsf.create(name);
File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
AttributesFactory af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region r = cache.createRegion("r", af.create());
int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
r.put("key1", "value1");
r.put("key2", "value2");
cache.close();
ds.disconnect();
System.out.println("empty rvv size = " + getRVVSize(0, null, false));
System.out.println("empty rvvgc size = " + getRVVSize(1, new int[] { 0 }, true));
System.out.println("1 member rvv size = " + getRVVSize(1, new int[] { 1 }, false));
System.out.println("2 member rvv size = " + getRVVSize(2, new int[] { 1, 1 }, false));
DiskStoreImpl.validate(name, diskStore.getDiskDirs());
int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
long originalIfLength = ifFile.length();
DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
assertEquals(0, dsi.getDeadRecordCount());
assertEquals(2, dsi.getLiveEntryCount());
assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, new int[] { 0 }, true), drfFile.length());
// offline compaction should not have created a new oplog.
assertEquals(originalIfLength, ifFile.length());
connectDSandCache();
dsf = cache.createDiskStoreFactory();
diskStore = dsf.create(name);
af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
r = cache.createRegion("r", af.create());
assertEquals(2, r.size());
assertEquals("value1", r.get("key1"));
assertEquals("value2", r.get("key2"));
// if test passed clean up files
r.destroyRegion();
diskStore.destroy();
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithUpdateAndDestroy.
@Test
public void testTwoEntriesWithUpdateAndDestroy() throws Exception {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(false);
String name = "testTwoEntriesWithUpdateAndDestroy";
DiskStore diskStore = dsf.create(name);
File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
AttributesFactory af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region r = cache.createRegion("r", af.create());
int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
r.put("key1", "value1");
r.put("key2", "value2");
r.put("key1", "update1");
r.put("key2", "update2");
r.remove("key2");
cache.close();
ds.disconnect();
DiskStoreImpl.validate(name, diskStore.getDiskDirs());
int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
// 1 tombstone without key
int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2 + tombstonesize1, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
long originalIfLength = ifFile.length();
DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
assertEquals(3, dsi.getDeadRecordCount());
assertEquals(2, dsi.getLiveEntryCount());
assertEquals(false, crfFile.exists());
assertEquals(false, drfFile.exists());
assertEquals(false, krfFile.exists());
crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
{
krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
assertEquals(true, krfFile.exists());
}
assertEquals(true, crfFile.exists());
assertEquals(true, drfFile.exists());
// compare file sizes
// After offline compaction, only one update with key and one tombstone with key are left
// No more OPLOG_NEW_ENTRY_BASE_REC.
// The crf now contains an RVV with one entry
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
updatesize1 += getStrSizeInOplog("key1");
tombstonesize1 += getStrSizeInOplog("key2");
assertEquals(crfsize + updatesize1 + tombstonesize1, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drfFile.length());
assertEquals(originalIfLength, ifFile.length());
connectDSandCache();
dsf = cache.createDiskStoreFactory();
diskStore = dsf.create(name);
af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
r = cache.createRegion("r", af.create());
assertEquals(1, r.size());
assertEquals("update1", r.get("key1"));
// if test passed clean up files
r.destroyRegion();
diskStore.destroy();
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskRegionIllegalArguementsJUnitTest method testTimeInterval.
@Test
public void testTimeInterval() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
try {
dsf.setTimeInterval(-1);
fail("expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
}
dsf.setTimeInterval(1);
assertEquals(dsf.create("test").getTimeInterval(), 1);
}
Aggregations