use of org.apache.geode.cache.DiskStore in project geode by apache.
the class WANTestBase method createSenderForValidations.
public static void createSenderForValidations(String dsName, int remoteDsId, boolean isParallel, Integer alertThreshold, boolean isConflation, boolean isPersistent, List<GatewayEventFilter> eventFilters, List<GatewayTransportFilter> transportFilters, boolean isManualStart, boolean isDiskSync) {
IgnoredException exp1 = IgnoredException.addIgnoredException(RegionDestroyedException.class.getName());
try {
File persistentDirectory = new File(dsName + "_disk_" + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
persistentDirectory.mkdir();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
File[] dirs1 = new File[] { persistentDirectory };
if (isParallel) {
GatewaySenderFactory gateway = cache.createGatewaySenderFactory();
gateway.setParallel(true);
gateway.setAlertThreshold(alertThreshold);
((InternalGatewaySenderFactory) gateway).setLocatorDiscoveryCallback(new MyLocatorCallback());
if (eventFilters != null) {
for (GatewayEventFilter filter : eventFilters) {
gateway.addGatewayEventFilter(filter);
}
}
if (transportFilters != null) {
for (GatewayTransportFilter filter : transportFilters) {
gateway.addGatewayTransportFilter(filter);
}
}
if (isPersistent) {
gateway.setPersistenceEnabled(true);
gateway.setDiskStoreName(dsf.setDiskDirs(dirs1).create(dsName + "_Parallel").getName());
} else {
DiskStore store = dsf.setDiskDirs(dirs1).create(dsName + "_Parallel");
gateway.setDiskStoreName(store.getName());
}
gateway.setDiskSynchronous(isDiskSync);
gateway.setBatchConflationEnabled(isConflation);
gateway.setManualStart(isManualStart);
// set dispatcher threads
gateway.setDispatcherThreads(numDispatcherThreadsForTheRun);
gateway.create(dsName, remoteDsId);
} else {
GatewaySenderFactory gateway = cache.createGatewaySenderFactory();
gateway.setAlertThreshold(alertThreshold);
gateway.setManualStart(isManualStart);
// set dispatcher threads
gateway.setDispatcherThreads(numDispatcherThreadsForTheRun);
((InternalGatewaySenderFactory) gateway).setLocatorDiscoveryCallback(new MyLocatorCallback());
if (eventFilters != null) {
for (GatewayEventFilter filter : eventFilters) {
gateway.addGatewayEventFilter(filter);
}
}
if (transportFilters != null) {
for (GatewayTransportFilter filter : transportFilters) {
gateway.addGatewayTransportFilter(filter);
}
}
gateway.setBatchConflationEnabled(isConflation);
if (isPersistent) {
gateway.setPersistenceEnabled(true);
gateway.setDiskStoreName(dsf.setDiskDirs(dirs1).create(dsName + "_Serial").getName());
} else {
DiskStore store = dsf.setDiskDirs(dirs1).create(dsName + "_Serial");
gateway.setDiskStoreName(store.getName());
}
gateway.setDiskSynchronous(isDiskSync);
gateway.create(dsName, remoteDsId);
}
} finally {
exp1.remove();
}
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class ParallelGatewaySenderQueueOverflowDUnitTest method test_ValidateParallelGatewaySenderQueueAttributes_1.
/**
* Test to validate that ParallelGatewaySenderQueue diskSynchronous attribute when persistence of
* sender is enabled.
*/
@Ignore("TODO: test is disabled")
@Test
public void test_ValidateParallelGatewaySenderQueueAttributes_1() {
Integer localLocPort = (Integer) vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId(1));
Integer remoteLocPort = (Integer) vm1.invoke(() -> WANTestBase.createFirstRemoteLocator(2, localLocPort));
WANTestBase test = new WANTestBase();
Properties props = test.getDistributedSystemProperties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "localhost[" + localLocPort + "]");
InternalDistributedSystem ds = test.getSystem(props);
cache = CacheFactory.create(ds);
File directory = new File("TKSender" + "_disk_" + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
directory.mkdir();
File[] dirs1 = new File[] { directory };
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(dirs1);
DiskStore diskStore = dsf.create("FORNY");
GatewaySenderFactory fact = cache.createGatewaySenderFactory();
// set parallel to true
fact.setParallel(true);
fact.setBatchConflationEnabled(true);
fact.setBatchSize(200);
fact.setBatchTimeInterval(300);
// enable the persistence
fact.setPersistenceEnabled(true);
fact.setDiskSynchronous(true);
fact.setDiskStoreName("FORNY");
fact.setMaximumQueueMemory(200);
fact.setAlertThreshold(1200);
GatewayEventFilter myEventFilter1 = new MyGatewayEventFilter1();
fact.addGatewayEventFilter(myEventFilter1);
GatewayTransportFilter myStreamFilter1 = new MyGatewayTransportFilter1();
fact.addGatewayTransportFilter(myStreamFilter1);
GatewayTransportFilter myStreamFilter2 = new MyGatewayTransportFilter2();
fact.addGatewayTransportFilter(myStreamFilter2);
final IgnoredException exTKSender = IgnoredException.addIgnoredException("Could not connect");
try {
GatewaySender sender1 = fact.create("TKSender", 2);
AttributesFactory factory = new AttributesFactory();
factory.addGatewaySenderId(sender1.getId());
factory.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
Region region = cache.createRegionFactory(factory.create()).create("test_ValidateGatewaySenderAttributes");
Set<GatewaySender> senders = cache.getGatewaySenders();
assertEquals(senders.size(), 1);
GatewaySender gatewaySender = senders.iterator().next();
Set<RegionQueue> regionQueues = ((AbstractGatewaySender) gatewaySender).getQueues();
assertEquals(regionQueues.size(), 1);
RegionQueue regionQueue = regionQueues.iterator().next();
assertEquals(true, regionQueue.getRegion().getAttributes().isDiskSynchronous());
} finally {
exTKSender.remove();
}
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DescribeDiskStoreFunctionJUnitTest method testIsAsyncEventQueueUsingDiskStore.
@Test
public void testIsAsyncEventQueueUsingDiskStore() {
final String diskStoreName = "testDiskStore";
final AsyncEventQueue mockQueue = mockContext.mock(AsyncEventQueue.class, "AsyncEventQueue");
final DiskStore mockDiskStore = mockContext.mock(DiskStore.class, "DiskStore");
mockContext.checking(new Expectations() {
{
oneOf(mockQueue).getDiskStoreName();
will(returnValue(diskStoreName));
oneOf(mockQueue).isPersistent();
will(returnValue(true));
oneOf(mockDiskStore).getName();
will(returnValue(diskStoreName));
}
});
final DescribeDiskStoreFunction function = createDescribeDiskStoreFunction(null);
assertTrue(function.isUsingDiskStore(mockQueue, mockDiskStore));
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DescribeDiskStoreFunctionJUnitTest method testIsAsyncEventQueueUsingDiskStoreWhenQueueIsNotPersistent.
@Test
public void testIsAsyncEventQueueUsingDiskStoreWhenQueueIsNotPersistent() {
final String diskStoreName = "testDiskStore";
final AsyncEventQueue mockQueue = mockContext.mock(AsyncEventQueue.class, "AsyncEventQueue");
final DiskStore mockDiskStore = mockContext.mock(DiskStore.class, "DiskStore");
mockContext.checking(new Expectations() {
{
oneOf(mockQueue).isPersistent();
will(returnValue(false));
}
});
final DescribeDiskStoreFunction function = createDescribeDiskStoreFunction(null);
assertFalse(function.isUsingDiskStore(mockQueue, mockDiskStore));
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DescribeDiskStoreFunctionJUnitTest method testExecute.
@Test
public void testExecute() throws Throwable {
final UUID diskStoreId = UUID.randomUUID();
final String diskStoreName = "mockDiskStore";
final String memberId = "mockMemberId";
final String memberName = "mockMemberName";
final InternalCache mockCache = mockContext.mock(InternalCache.class, "Cache");
final InternalDistributedMember mockMember = mockContext.mock(InternalDistributedMember.class, "DistributedMember");
final DiskStore mockDiskStore = createMockDiskStore(diskStoreId, diskStoreName, true, false, 75, 8192l, 500, 120l, 10240, createFileArray("/export/disk/backup", "/export/disk/overflow", "/export/disk/persistence"), createIntArray(10240, 204800, 4096000), 50, 75);
final FunctionContext mockFunctionContext = mockContext.mock(FunctionContext.class, "testExecute$FunctionContext");
final TestResultSender testResultSender = new TestResultSender();
mockContext.checking(new Expectations() {
{
oneOf(mockCache).getMyId();
will(returnValue(mockMember));
oneOf(mockCache).findDiskStore(diskStoreName);
will(returnValue(mockDiskStore));
oneOf(mockCache).getPdxPersistent();
will(returnValue(true));
oneOf(mockCache).getPdxDiskStore();
will(returnValue("memoryStore"));
oneOf(mockMember).getId();
will(returnValue(memberId));
oneOf(mockMember).getName();
will(returnValue(memberName));
oneOf(mockFunctionContext).getArguments();
will(returnValue(diskStoreName));
oneOf(mockFunctionContext).getResultSender();
will(returnValue(testResultSender));
}
});
final Set<DiskStoreDetails.RegionDetails> expectedRegionDetails = setupRegionsForTestExecute(mockCache, diskStoreName);
final Set<DiskStoreDetails.GatewayDetails> expectedGatewayDetails = setupGatewaysForTestExecute(mockCache, diskStoreName);
final Set<DiskStoreDetails.CacheServerDetails> expectedCacheServerDetails = setupCacheServersForTestExecute(mockCache, diskStoreName);
final Set<DiskStoreDetails.AsyncEventQueueDetails> expectedAsyncEventQueueDetails = setupAsyncEventQueuesForTestExecute(mockCache, diskStoreName);
final DescribeDiskStoreFunction function = createDescribeDiskStoreFunction(mockCache);
function.execute(mockFunctionContext);
final List<?> results = testResultSender.getResults();
assertNotNull(results);
assertEquals(1, results.size());
final DiskStoreDetails diskStoreDetails = (DiskStoreDetails) results.get(0);
assertNotNull(diskStoreDetails);
assertEquals(diskStoreId, diskStoreDetails.getId());
assertEquals(diskStoreName, diskStoreDetails.getName());
assertEquals(memberId, diskStoreDetails.getMemberId());
assertEquals(memberName, diskStoreDetails.getMemberName());
assertTrue(diskStoreDetails.getAllowForceCompaction());
assertFalse(diskStoreDetails.getAutoCompact());
assertEquals(75, diskStoreDetails.getCompactionThreshold().intValue());
assertEquals(8192l, diskStoreDetails.getMaxOplogSize().longValue());
assertFalse(diskStoreDetails.isPdxSerializationMetaDataStored());
assertEquals(500, diskStoreDetails.getQueueSize().intValue());
assertEquals(120l, diskStoreDetails.getTimeInterval().longValue());
assertEquals(10240, diskStoreDetails.getWriteBufferSize().intValue());
assertEquals(50.0f, diskStoreDetails.getDiskUsageWarningPercentage().floatValue(), 0.0f);
assertEquals(75.0f, diskStoreDetails.getDiskUsageCriticalPercentage().floatValue(), 0.0f);
final List<String> expectedDiskDirs = Arrays.asList(new File("/export/disk/backup").getAbsolutePath(), new File("/export/disk/overflow").getAbsolutePath(), new File("/export/disk/persistence").getAbsolutePath());
final List<Integer> expectdDiskDirSizes = Arrays.asList(10240, 204800, 4096000);
int count = 0;
for (final DiskStoreDetails.DiskDirDetails diskDirDetails : diskStoreDetails) {
assertTrue(expectedDiskDirs.contains(diskDirDetails.getAbsolutePath()));
assertTrue(expectdDiskDirSizes.contains(diskDirDetails.getSize()));
count++;
}
assertEquals(expectedDiskDirs.size(), count);
assertRegionDetails(expectedRegionDetails, diskStoreDetails);
assertCacheServerDetails(expectedCacheServerDetails, diskStoreDetails);
assertGatewayDetails(expectedGatewayDetails, diskStoreDetails);
assertAsyncEventQueueDetails(expectedAsyncEventQueueDetails, diskStoreDetails);
}
Aggregations