use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskManagementDUnitTest method createPersistentRegionAsync.
private AsyncInvocation createPersistentRegionAsync(final VM memberVM) {
return memberVM.invokeAsync("createPersistentRegionAsync", () -> {
File dir = new File(diskDir, String.valueOf(ProcessUtils.identifyPid()));
Cache cache = this.managementTestRule.getCache();
DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
diskStoreFactory.setDiskDirs(new File[] { dir });
diskStoreFactory.setMaxOplogSize(1);
diskStoreFactory.setAllowForceCompaction(true);
diskStoreFactory.setAutoCompact(false);
DiskStore diskStore = diskStoreFactory.create(REGION_NAME);
RegionFactory regionFactory = cache.createRegionFactory();
regionFactory.setDiskStoreName(diskStore.getName());
regionFactory.setDiskSynchronous(true);
regionFactory.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
regionFactory.setScope(Scope.DISTRIBUTED_ACK);
regionFactory.create(REGION_NAME);
});
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskManagementDUnitTest method invokeForceCompaction.
/**
* Invokes force compaction on disk store by MBean interface
*/
private void invokeForceCompaction(final VM memberVM) {
memberVM.invoke("invokeForceCompaction", () -> {
Cache cache = this.managementTestRule.getCache();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAllowForceCompaction(true);
String name = "testForceCompaction_" + ProcessUtils.identifyPid();
DiskStore diskStore = dsf.create(name);
ManagementService service = this.managementTestRule.getManagementService();
DiskStoreMXBean diskStoreMXBean = service.getLocalDiskStoreMBean(name);
assertThat(diskStoreMXBean).isNotNull();
assertThat(diskStoreMXBean.getName()).isEqualTo(diskStore.getName());
assertThat(diskStoreMXBean.forceCompaction()).isFalse();
});
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskManagementDUnitTest method invokeForceRoll.
/**
* Invokes force roll on disk store by MBean interface
*/
private void invokeForceRoll(final VM memberVM) {
memberVM.invoke("invokeForceRoll", () -> {
Cache cache = this.managementTestRule.getCache();
DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory();
String name = "testForceRoll_" + ProcessUtils.identifyPid();
DiskStore diskStore = diskStoreFactory.create(name);
ManagementService service = this.managementTestRule.getManagementService();
DiskStoreMXBean diskStoreMXBean = service.getLocalDiskStoreMBean(name);
assertThat(diskStoreMXBean).isNotNull();
assertThat(diskStoreMXBean.getName()).isEqualTo(diskStore.getName());
diskStoreMXBean.forceRoll();
});
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class IndexCreationDeadLockJUnitTest method testIndexCreationDeadLockForOverflowToDiskRegion.
/**
* Tests inability to create index on a region which overflows to disk *
*/
@Test
public void testIndexCreationDeadLockForOverflowToDiskRegion() {
this.region.destroyRegion();
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setValueConstraint(Portfolio.class);
factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(1, EvictionAction.OVERFLOW_TO_DISK));
factory.setIndexMaintenanceSynchronous(true);
File dir = new File("test");
dir.mkdir();
DiskStoreFactory dsf = region.getCache().createDiskStoreFactory();
DiskStore ds1 = dsf.setDiskDirs(new File[] { dir }).create("ds1");
factory.setDiskStoreName("ds1");
dir.deleteOnExit();
region = CacheUtils.createRegion("portfolios", factory.create(), true);
simulateDeadlockScenario();
assertFalse(this.cause, this.testFailed);
assertTrue("Index creation succeeded . For diskRegion this shoudl not have happened", this.exceptionInCreatingIndex);
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class CacheXml66DUnitTest method testDiskStore.
@Test
public void testDiskStore() throws Exception {
CacheCreation cache = new CacheCreation();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
File[] dirs1 = new File[] { new File("").getAbsoluteFile() };
DiskStore ds1 = dsf.setAllowForceCompaction(true).setAutoCompact(true).setCompactionThreshold(100).setMaxOplogSize(2).setTimeInterval(10).setWriteBufferSize(15).setQueueSize(12).setDiskDirsAndSizes(dirs1, new int[] { 1024 * 20 }).create(getUniqueName() + 1);
File[] dirs2 = new File[] { new File("").getAbsoluteFile() };
DiskStore ds2 = dsf.setAllowForceCompaction(false).setAutoCompact(false).setCompactionThreshold(99).setMaxOplogSize(1).setTimeInterval(9).setWriteBufferSize(14).setQueueSize(11).setDiskDirsAndSizes(dirs2, new int[] { 1024 * 40 }).create(getUniqueName() + 2);
RegionAttributesCreation attrs = new RegionAttributesCreation(cache);
attrs.setScope(Scope.DISTRIBUTED_ACK);
attrs.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
attrs.setDiskStoreName(getUniqueName() + 1);
attrs.setDiskSynchronous(true);
RegionCreation root = (RegionCreation) cache.createRegion("root", attrs);
{
attrs = new RegionAttributesCreation(cache);
attrs.setScope(Scope.DISTRIBUTED_ACK);
attrs.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
attrs.setDiskStoreName(getUniqueName() + 2);
Region subwithdiskstore = root.createSubregion("subwithdiskstore", attrs);
}
{
attrs = new RegionAttributesCreation(cache);
attrs.setScope(Scope.DISTRIBUTED_ACK);
attrs.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region subwithdefaultdiskstore = root.createSubregion("subwithdefaultdiskstore", attrs);
}
testXml(cache);
}
Aggregations