use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskStoreFactoryJUnitTest method testFlush.
@Test
public void testFlush() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
String name = "testFlush";
DiskStore ds = dsf.create(name);
ds.flush();
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class DiskStoreFactoryJUnitTest method testDestroyWithClosedRegion.
@Test
public void testDestroyWithClosedRegion() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
String name = "testDestroy";
DiskStore ds = dsf.create(name);
Region region = cache.createRegionFactory(RegionShortcut.LOCAL_PERSISTENT).setDiskStoreName("testDestroy").create("region");
region.close();
// This should now work
ds.destroy();
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class OplogJUnitTest method testPreblowErrorCondition.
/**
* Tests if the preblowing of a file with size greater than the disk space available so that
* preblowing results in IOException , is able to recover without problem
*
*/
// Now we preallocate spaces for if files and also crfs and drfs. So the below test is not valid
// any more. See revision: r42359 and r42320. So disabling this test.
@Ignore("TODO: test is disabled")
@Test
public void testPreblowErrorCondition() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
((DiskStoreFactoryImpl) dsf).setMaxOplogSizeInBytes(100000000L * 1024L * 1024L * 1024L);
dsf.setAutoCompact(false);
File dir = new File("testingDirectoryDefault");
dir.mkdir();
dir.deleteOnExit();
File[] dirs = { dir };
int[] size = new int[] { Integer.MAX_VALUE };
dsf.setDiskDirsAndSizes(dirs, size);
AttributesFactory factory = new AttributesFactory();
logWriter.info("<ExpectedException action=add>" + "Could not pregrow" + "</ExpectedException>");
try {
DiskStore ds = dsf.create("test");
factory.setDiskStoreName(ds.getName());
factory.setDiskSynchronous(true);
factory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
factory.setScope(Scope.LOCAL);
try {
region = cache.createVMRegion("test", factory.createRegionAttributes());
} catch (Exception e1) {
logWriter.error("Test failed due to exception", e1);
fail("Test failed due to exception " + e1);
}
region.put("key1", new byte[900]);
byte[] val = null;
try {
val = (byte[]) ((LocalRegion) region).getValueOnDisk("key1");
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
fail(e.toString());
}
assertTrue(val.length == 900);
} finally {
logWriter.info("<ExpectedException action=remove>" + "Could not pregrow" + "</ExpectedException>");
}
closeDown();
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class PersistentReplicatedTestBase method createPersistentRegionWithoutCompaction.
protected SerializableRunnable createPersistentRegionWithoutCompaction(final VM vm0) {
SerializableRunnable createRegion = new SerializableRunnable("Create persistent region") {
public void run() {
Cache cache = getCache();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
File dir = getDiskDirForVM(vm0);
dir.mkdirs();
dsf.setDiskDirs(new File[] { dir });
dsf.setMaxOplogSize(1);
dsf.setAutoCompact(false);
dsf.setAllowForceCompaction(true);
dsf.setCompactionThreshold(20);
DiskStore ds = dsf.create(REGION_NAME);
RegionFactory rf = new RegionFactory();
rf.setDiskStoreName(ds.getName());
rf.setDiskSynchronous(true);
rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
rf.setScope(Scope.DISTRIBUTED_ACK);
rf.create(REGION_NAME);
}
};
vm0.invoke(createRegion);
return createRegion;
}
use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.
the class RollingUpgrade2DUnitTest method createPersistentReplicateRegion.
public static void createPersistentReplicateRegion(GemFireCache cache, String regionName, File diskStore) throws Exception {
DiskStore store = cache.findDiskStore("store");
if (store == null) {
DiskStoreFactory factory = cache.createDiskStoreFactory();
factory.setMaxOplogSize(1L);
factory.setDiskDirs(new File[] { diskStore.getAbsoluteFile() });
factory.create("store");
}
RegionFactory rf = ((GemFireCacheImpl) cache).createRegionFactory();
rf.setDiskStoreName("store");
rf.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
rf.create(regionName);
}
Aggregations