use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskOfflineCompactionJUnitTest method testForceRollTwoEntriesWithUpdates.
@Test
public void testForceRollTwoEntriesWithUpdates() throws Exception {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(false);
String name = "testForceRollTwoEntriesWithUpdates";
DiskStore diskStore = dsf.create(name);
File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
AttributesFactory af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region r = cache.createRegion("r", af.create());
int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
// extra key to keep oplog1 from being empty
r.put("key0", "value0");
r.put("key1", "value1");
r.put("key2", "value2");
diskStore.forceRoll();
r.put("key1", "update1");
r.put("key2", "update2");
cache.close();
ds.disconnect();
DiskStoreImpl.validate(name, diskStore.getDiskDirs());
int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
int createsize0 = getSize4Create(extra_byte_num_per_entry, "key0", "value0");
int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key1", "update1");
int updatesize2 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key2", "update2");
assertEquals(crfsize + createsize0 + createsize1 + createsize2, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
assertEquals(crfsize + updatesize1 + updatesize2, crf2File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
long originalIfLength = ifFile.length();
DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
assertEquals(2, dsi.getDeadRecordCount());
assertEquals(3, dsi.getLiveEntryCount());
assertEquals(false, crfFile.exists());
assertEquals(false, drfFile.exists());
assertEquals(false, krfFile.exists());
// oplog2 contains two updates so it remains unchanged
assertEquals(crfsize + updatesize1 + updatesize2, crf2File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
assertEquals(true, krf3File.exists());
assertEquals(true, crf3File.exists());
assertEquals(true, drf3File.exists());
// after offline compaction, rvv is reset, and only one update-with-key, i.e. key0 in _3.crf
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
int updatesize0 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key0", "value0");
assertEquals(crfsize + updatesize0, crf3File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf3File.length());
assertEquals(originalIfLength, ifFile.length());
connectDSandCache();
dsf = cache.createDiskStoreFactory();
diskStore = dsf.create(name);
af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
r = cache.createRegion("r", af.create());
assertEquals(3, r.size());
assertEquals("value0", r.get("key0"));
assertEquals("update1", r.get("key1"));
assertEquals("update2", r.get("key2"));
// if test passed clean up files
r.destroyRegion();
diskStore.destroy();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskOfflineCompactionJUnitTest method testbug41862.
@Test
public void testbug41862() throws Exception {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(false);
String name = "testbug41862";
DiskStore diskStore = dsf.create(name);
File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
AttributesFactory af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region r = cache.createRegion("r", af.create());
int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
r.create("key1", "value1");
// to keep this oplog from going empty
r.create("key2", "value2");
((LocalRegion) r).getDiskStore().forceRoll();
r.create("key3", "value3");
r.remove("key1");
cache.close();
ds.disconnect();
DiskStoreImpl.validate(name, diskStore.getDiskDirs());
int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
int createsize3 = getSize4Create(extra_byte_num_per_entry, "key3", "value3");
// 1 tombstone with key
int tombstonesize1 = getSize4TombstoneWithKey(extra_byte_num_per_entry, "key1");
assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
// adjust rvv
crfsize += (getRVVSize(1, new int[] { 1 }, false) - getRVVSize(0, null, false));
// size
assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
long originalIfLength = ifFile.length();
DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
assertEquals(1, dsi.getDeadRecordCount());
assertEquals(3, dsi.getLiveEntryCount());
assertEquals(false, crfFile.exists());
assertEquals(false, drfFile.exists());
assertEquals(false, krfFile.exists());
// offline compaction did not change _2.crf and _2.drf not changed.
assertEquals(crfsize + createsize3 + tombstonesize1, crf2File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
// offline compaction reset rvv to be empty, create-entry becomes one update-with-key-entry in
// _3.crf,
// since there's no creates, then there's no OPLOG_NEW_ENTRY_BASE_REC_SIZE
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key3", "value3");
File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
assertEquals(true, krf3File.exists());
assertEquals(true, crf3File.exists());
assertEquals(true, drf3File.exists());
assertEquals(crfsize + updatesize1, crf3File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf3File.length());
assertEquals(originalIfLength, ifFile.length());
connectDSandCache();
dsf = cache.createDiskStoreFactory();
diskStore = dsf.create(name);
af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
r = cache.createRegion("r", af.create());
assertEquals(2, r.size());
assertEquals("value2", r.get("key2"));
assertEquals("value3", r.get("key3"));
// if test passed clean up files
r.destroyRegion();
diskStore.destroy();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegCacheXmlJUnitTest method testDiskRegCacheXml.
@Test
public void testDiskRegCacheXml() throws Exception {
mkDirAndConnectDs();
// Get the region1 which is a subregion of /root
Region region1 = cache.getRegion("/root1/PersistSynchRollingOplog1");
DiskStore ds = ((LocalRegion) region1).getDiskStore();
if (ds != null) {
if (!Arrays.equals(dirs, ds.getDiskDirs())) {
fail("expected=" + Arrays.toString(dirs) + " actual=" + ds.getDiskDirs());
}
} else {
if (!Arrays.equals(dirs, region1.getAttributes().getDiskDirs())) {
fail("expected=" + Arrays.toString(dirs) + " actual=" + region1.getAttributes().getDiskDirs());
}
}
RegionAttributes ra1 = ((LocalRegion) region1).getAttributes();
assertTrue(ra1.isDiskSynchronous() == true);
DiskStore ds1 = cache.findDiskStore(((LocalRegion) region1).getDiskStoreName());
assertTrue(ds1 != null);
assertTrue(ds1.getAutoCompact() == true);
assertTrue(ds1.getMaxOplogSize() == 2);
// Get the region2 which is a subregion of /root
Region region2 = cache.getRegion("/root2/PersistSynchFixedOplog2");
RegionAttributes ra2 = ((LocalRegion) region2).getAttributes();
assertTrue(ra2.isDiskSynchronous() == true);
DiskStore ds2 = cache.findDiskStore(((LocalRegion) region2).getDiskStoreName());
assertTrue(ds2 != null);
assertTrue(ds2.getAutoCompact() == false);
assertTrue(ds2.getMaxOplogSize() == 0);
// Get the region3 which is a subregion of /root
Region region3 = cache.getRegion("/root3/PersistASynchBufferRollingOplog3");
RegionAttributes ra3 = ((LocalRegion) region3).getAttributes();
assertTrue(ra3.isDiskSynchronous() == false);
DiskStore ds3 = cache.findDiskStore(((LocalRegion) region3).getDiskStoreName());
assertTrue(ds3 != null);
assertTrue(ds3.getAutoCompact() == true);
assertTrue(ds3.getMaxOplogSize() == 2);
assertTrue(ds3.getQueueSize() == 10000);
assertTrue(ds3.getTimeInterval() == 15);
// Get the region4 which is a subregion of /root
Region region4 = cache.getRegion("/root4/PersistASynchNoBufferFixedOplog4");
RegionAttributes ra4 = ((LocalRegion) region4).getAttributes();
assertTrue(ra4.isDiskSynchronous() == false);
DiskStore ds4 = cache.findDiskStore(((LocalRegion) region4).getDiskStoreName());
assertTrue(ds4 != null);
assertTrue(ds4.getAutoCompact() == false);
assertTrue(ds4.getMaxOplogSize() == 2);
assertTrue(ds4.getQueueSize() == 0);
// Get the region5 which is a subregion of /root
Region region5 = cache.getRegion("/root5/OverflowSynchRollingOplog5");
RegionAttributes ra5 = ((LocalRegion) region5).getAttributes();
assertTrue(ra5.isDiskSynchronous() == true);
DiskStore ds5 = cache.findDiskStore(((LocalRegion) region5).getDiskStoreName());
assertTrue(ds5 != null);
assertTrue(ds5.getAutoCompact() == true);
assertTrue(ds5.getMaxOplogSize() == 2);
// Get the region6 which is a subregion of /root
Region region6 = cache.getRegion("/root6/OverflowSynchFixedOplog6");
RegionAttributes ra6 = ((LocalRegion) region6).getAttributes();
assertTrue(ra6.isDiskSynchronous() == true);
DiskStore ds6 = cache.findDiskStore(((LocalRegion) region6).getDiskStoreName());
assertTrue(ds6 != null);
assertTrue(ds6.getAutoCompact() == false);
assertTrue(ds6.getMaxOplogSize() == 0);
// Get the region7 which is a subregion of /root
Region region7 = cache.getRegion("/root7/OverflowASynchBufferRollingOplog7");
RegionAttributes ra7 = ((LocalRegion) region7).getAttributes();
assertTrue(ra7.isDiskSynchronous() == false);
DiskStore ds7 = cache.findDiskStore(((LocalRegion) region7).getDiskStoreName());
assertTrue(ds7 != null);
assertTrue(ds7.getAutoCompact() == true);
assertTrue(ds7.getMaxOplogSize() == 2);
// Get the region8 which is a subregion of /root
Region region8 = cache.getRegion("/root8/OverflowASynchNoBufferFixedOplog8");
RegionAttributes ra8 = ((LocalRegion) region8).getAttributes();
assertTrue(ra8.isDiskSynchronous() == false);
DiskStore ds8 = cache.findDiskStore(((LocalRegion) region8).getDiskStoreName());
assertTrue(ds8 != null);
assertTrue(ds8.getAutoCompact() == false);
// Get the region9 which is a subregion of /root
Region region9 = cache.getRegion("/root9/PersistOverflowSynchRollingOplog9");
RegionAttributes ra9 = ((LocalRegion) region9).getAttributes();
assertTrue(ra9.isDiskSynchronous() == true);
DiskStore ds9 = cache.findDiskStore(((LocalRegion) region9).getDiskStoreName());
assertTrue(ds9 != null);
assertTrue(ds9.getAutoCompact() == true);
assertTrue(ds9.getMaxOplogSize() == 2);
// Get the region10 which is a subregion of /root
Region region10 = cache.getRegion("/root10/PersistOverflowSynchFixedOplog10");
RegionAttributes ra10 = ((LocalRegion) region10).getAttributes();
assertTrue(ra10.isDiskSynchronous() == true);
DiskStore ds10 = cache.findDiskStore(((LocalRegion) region10).getDiskStoreName());
assertTrue(ds10 != null);
assertTrue(ds10.getAutoCompact() == false);
// Get the region11 which is a subregion of /root
Region region11 = cache.getRegion("/root11/PersistOverflowASynchBufferRollingOplog11");
RegionAttributes ra11 = ((LocalRegion) region11).getAttributes();
assertTrue(ra11.isDiskSynchronous() == false);
DiskStore ds11 = cache.findDiskStore(((LocalRegion) region11).getDiskStoreName());
assertTrue(ds11 != null);
assertTrue(ds11.getAutoCompact() == true);
assertTrue(ds11.getMaxOplogSize() == 2);
// Get the region12 which is a subregion of /root
Region region12 = cache.getRegion("/root12/PersistOverflowASynchNoBufferFixedOplog12");
RegionAttributes ra12 = ((LocalRegion) region12).getAttributes();
assertTrue(ra12.isDiskSynchronous() == false);
DiskStore ds12 = cache.findDiskStore(((LocalRegion) region12).getDiskStoreName());
assertTrue(ds12 != null);
assertTrue(ds12.getTimeInterval() == 15);
assertTrue(ds12.getQueueSize() == 0);
deleteFiles();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegOplogSwtchingAndRollerJUnitTest method testAsyncPersistRegionDAExp.
// end of testSyncPersistRegionDAExp
@Test
public void testAsyncPersistRegionDAExp() {
File testingDirectory1 = new File("testingDirectory1");
testingDirectory1.mkdir();
testingDirectory1.deleteOnExit();
File file1 = new File("testingDirectory1/" + "testAsyncPersistRegionDAExp" + "1");
file1.mkdir();
file1.deleteOnExit();
dirs1 = new File[1];
dirs1[0] = file1;
diskDirSize1 = new int[1];
diskDirSize1[0] = 2048;
diskProps.setDiskDirsAndSizes(dirs1, diskDirSize1);
diskProps.setPersistBackup(true);
diskProps.setRolling(true);
diskProps.setMaxOplogSize(100000000);
diskProps.setRegionName("region_AsyncPersistRegionDAExp");
region = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache, diskProps);
LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
DiskStore ds = cache.findDiskStore(((LocalRegion) region).getDiskStoreName());
// int[] diskSizes1 = ((LocalRegion)region).getDiskDirSizes();
assertTrue(ds != null);
int[] diskSizes1 = null;
diskSizes1 = ds.getDiskDirSizes();
assertEquals(1, diskDirSize1.length);
assertTrue("diskSizes != 2048 ", diskSizes1[0] == 2048);
this.diskAccessExpHelpermethod(region);
// region.close(); // closes disk file which will flush all buffers
closeDown();
LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class PersistentRecoveryOrderDUnitTest method createPersistentRegionAsync.
protected AsyncInvocation createPersistentRegionAsync(final VM vm, final boolean diskSynchronous) {
SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {
public void run() {
Cache cache = getCache();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
File dir = getDiskDirForVM(vm);
dir.mkdirs();
dsf.setDiskDirs(new File[] { dir });
dsf.setMaxOplogSize(1);
DiskStore ds = dsf.create(REGION_NAME);
RegionFactory rf = new RegionFactory();
rf.setDiskStoreName(ds.getName());
rf.setDiskSynchronous(diskSynchronous);
rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
rf.setScope(Scope.DISTRIBUTED_ACK);
rf.create(REGION_NAME);
}
};
return vm.invokeAsync(createRegion);
}
Aggregations