use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DistributedTransactionDUnitTest method getPersistentPRAttributes.
protected RegionAttributes getPersistentPRAttributes(final int redundancy, final int recoveryDelay, Cache cache, int numBuckets, boolean synchronous) {
DiskStore ds = cache.findDiskStore("disk");
if (ds == null) {
ds = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
}
AttributesFactory af = new AttributesFactory();
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setRedundantCopies(redundancy);
paf.setRecoveryDelay(recoveryDelay);
paf.setTotalNumBuckets(numBuckets);
paf.setLocalMaxMemory(500);
af.setPartitionAttributes(paf.create());
af.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
af.setDiskStoreName("disk");
af.setDiskSynchronous(synchronous);
RegionAttributes attr = af.create();
return attr;
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskStoreFactoryJUnitTest method testForceRoll.
@Test
public void testForceRoll() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
String name = "testForceRoll";
DiskStore ds = dsf.create(name);
ds.forceRoll();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionTestingBase method tearDown.
@After
public final void tearDown() throws Exception {
preTearDown();
try {
if (cache != null && !cache.isClosed()) {
for (Iterator itr = cache.rootRegions().iterator(); itr.hasNext(); ) {
Region root = (Region) itr.next();
if (root.isDestroyed() || root instanceof HARegion) {
continue;
}
try {
logWriter.info("<ExpectedException action=add>RegionDestroyedException</ExpectedException>");
root.localDestroyRegion("teardown");
logWriter.info("<ExpectedException action=remove>RegionDestroyedException</ExpectedException>");
} catch (RegionDestroyedException ignore) {
// ignore
}
}
}
for (DiskStore dstore : ((InternalCache) cache).listDiskStoresIncludingRegionOwned()) {
((DiskStoreImpl) dstore).waitForClose();
}
} finally {
closeCache();
}
ds.disconnect();
// Asif : below is not needed but leave it
deleteFiles();
DiskStoreImpl.SET_IGNORE_PREALLOCATE = false;
postTearDown();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegionJUnitTest method testOperationGreaterThanMaxDirSize.
/**
* As we have relaxed the constraint of max dir size
*/
@Test
public void testOperationGreaterThanMaxDirSize() throws Exception {
putsHaveStarted = false;
DiskRegionProperties diskRegionProperties = new DiskRegionProperties();
diskRegionProperties.setRegionName("IGNORE_EXCEPTION_testOperationGreaterThanMaxDirSize");
int[] dirSizes = { 1025, 1025, 1025, 1025 };
diskRegionProperties.setDiskDirsAndSizes(dirs, dirSizes);
diskRegionProperties.setMaxOplogSize(600);
diskRegionProperties.setRolling(false);
Region region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskRegionProperties, Scope.LOCAL);
DiskStore ds = ((LocalRegion) region).getDiskStore();
if (!Arrays.equals(dirSizes, ds.getDiskDirSizes())) {
fail("expected=" + Arrays.toString(dirSizes) + " actual=" + Arrays.toString(ds.getDiskDirSizes()));
}
Puts puts = new Puts(region, 1026);
puts.performPuts();
if (!puts.exceptionOccurred()) {
fail(" Exception was supposed to occur but did not occur");
}
if (puts.putSuccessful(0)) {
fail(" first put did succeed when it was not supposed to");
}
if (puts.putSuccessful(1)) {
fail(" second put did succeed when it was not supposed to");
}
if (puts.putSuccessful(2)) {
fail(" third put did succeed when it was not supposed to");
}
// if the exception occurred then the region should be closed already
((LocalRegion) region).getDiskStore().waitForClose();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskStoreFactoryJUnitTest method testGetDefaultInstance.
/**
* Test method for 'org.apache.geode.cache.DiskWriteAttributes.getDefaultInstance()'
*/
@Test
public void testGetDefaultInstance() {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
String name = "testGetDefaultInstance";
assertEquals(null, cache.findDiskStore(name));
DiskStore ds = dsf.create(name);
assertEquals(ds, cache.findDiskStore(name));
assertEquals(name, ds.getName());
assertEquals(DiskStoreFactory.DEFAULT_AUTO_COMPACT, ds.getAutoCompact());
assertEquals(DiskStoreFactory.DEFAULT_COMPACTION_THRESHOLD, ds.getCompactionThreshold());
assertEquals(DiskStoreFactory.DEFAULT_ALLOW_FORCE_COMPACTION, ds.getAllowForceCompaction());
assertEquals(DiskStoreFactory.DEFAULT_MAX_OPLOG_SIZE, ds.getMaxOplogSize());
assertEquals(DiskStoreFactory.DEFAULT_TIME_INTERVAL, ds.getTimeInterval());
assertEquals(DiskStoreFactory.DEFAULT_WRITE_BUFFER_SIZE, ds.getWriteBufferSize());
assertEquals(DiskStoreFactory.DEFAULT_QUEUE_SIZE, ds.getQueueSize());
if (!Arrays.equals(DiskStoreFactory.DEFAULT_DISK_DIRS, ds.getDiskDirs())) {
fail("expected=" + Arrays.toString(DiskStoreFactory.DEFAULT_DISK_DIRS) + " had=" + Arrays.toString(ds.getDiskDirs()));
}
if (!Arrays.equals(DiskStoreFactory.DEFAULT_DISK_DIR_SIZES, ds.getDiskDirSizes())) {
fail("expected=" + Arrays.toString(DiskStoreFactory.DEFAULT_DISK_DIR_SIZES) + " had=" + Arrays.toString(ds.getDiskDirSizes()));
}
}
Aggregations