use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class HARegionJUnitTest method createHARegion.
/**
* create the HARegion
*/
private Region createHARegion() throws TimeoutException, CacheWriterException, GatewayException, CacheExistsException, RegionExistsException, IOException, ClassNotFoundException {
AttributesFactory factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.setScope(Scope.DISTRIBUTED_ACK);
ExpirationAttributes ea = new ExpirationAttributes(2000, ExpirationAction.LOCAL_INVALIDATE);
factory.setStatisticsEnabled(true);
;
factory.setCacheListener(new CacheListenerAdapter() {
@Override
public void afterInvalidate(EntryEvent event) {
}
});
RegionAttributes ra = factory.create();
Region region = HARegion.getInstance("HARegionJUnitTest_region", (GemFireCacheImpl) cache, null, ra);
region.getAttributesMutator().setEntryTimeToLive(ea);
return region;
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class HARegionQueueDUnitTest method concurrentOperationsDunitTest.
private void concurrentOperationsDunitTest(final boolean createBlockingQueue, final Scope rscope) {
// Create Cache and HARegionQueue in all the 4 VMs.
CacheSerializableRunnable createRgnsAndQueues = new CacheSerializableRunnable("CreateCache, mirrored Region & HARegionQueue with a CacheListener") {
@Override
public void run2() throws CacheException {
HARegionQueueDUnitTest test = new HARegionQueueDUnitTest();
System.getProperties().put("QueueRemovalThreadWaitTime", "2000");
cache = test.createCache();
AttributesFactory factory = new AttributesFactory();
factory.setScope(rscope);
factory.setDataPolicy(DataPolicy.REPLICATE);
HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
hrqa.setExpiryTime(5);
try {
if (createBlockingQueue) {
hrq = HARegionQueue.getHARegionQueueInstance("testregion1", cache, hrqa, HARegionQueue.BLOCKING_HA_QUEUE, false);
} else {
hrq = HARegionQueue.getHARegionQueueInstance("testregion1", cache, hrqa, HARegionQueue.NON_BLOCKING_HA_QUEUE, false);
}
} catch (Exception e) {
throw new AssertionError(e);
}
factory.addCacheListener(new CacheListenerAdapter() {
@Override
public void afterCreate(final EntryEvent event) {
Conflatable conflatable = new ConflatableObject(event.getKey(), event.getNewValue(), ((EntryEventImpl) event).getEventId(), false, event.getRegion().getFullPath());
try {
hrq.put(conflatable);
} catch (Exception e) {
fail("The put operation in queue did not succeed due to exception =", e);
}
}
@Override
public void afterUpdate(final EntryEvent event) {
Conflatable conflatable = new ConflatableObject(event.getKey(), event.getNewValue(), ((EntryEventImpl) event).getEventId(), true, event.getRegion().getFullPath());
try {
hrq.put(conflatable);
} catch (Exception e) {
fail("The put operation in queue did not succeed due to exception =", e);
}
}
});
cache.createRegion("test_region", factory.create());
}
};
vm0.invoke(createRgnsAndQueues);
vm1.invoke(createRgnsAndQueues);
vm2.invoke(createRgnsAndQueues);
vm3.invoke(createRgnsAndQueues);
CacheSerializableRunnable spawnThreadsAndperformOps = new CacheSerializableRunnable("Spawn multiple threads which do various operations") {
@Override
public void run2() throws CacheException {
opThreads = new Thread[4 + 2 + 2 + 2];
for (int i = 0; i < 4; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.PUT, i), "ID=" + i + ",Op=" + RunOp.PUT);
}
for (int i = 4; i < 6; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.PEEK, i), "ID=" + i + ",Op=" + RunOp.PEEK);
}
for (int i = 6; i < 8; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.TAKE, i), "ID=" + i + ",Op=" + RunOp.TAKE);
}
for (int i = 8; i < 10; ++i) {
opThreads[i] = new Thread(new RunOp(RunOp.TAKE, i), "ID=" + i + ",Op=" + RunOp.BATCH_PEEK);
}
for (int i = 0; i < opThreads.length; ++i) {
opThreads[i].start();
}
}
};
vm0.invokeAsync(spawnThreadsAndperformOps);
vm1.invokeAsync(spawnThreadsAndperformOps);
vm2.invokeAsync(spawnThreadsAndperformOps);
vm3.invokeAsync(spawnThreadsAndperformOps);
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
fail("Test failed as the test thread encoutered exception in sleep", e1);
}
// Asif : In case of blocking HARegionQueue do some extra puts so that the
// blocking threads
// are exited
CacheSerializableRunnable toggleFlag = new CacheSerializableRunnable("Toggle the flag to signal end of threads") {
@Override
public void run2() throws CacheException {
toCnt = false;
if (createBlockingQueue) {
try {
for (int i = 0; i < 100; ++i) {
hrq.put(new ConflatableObject("1", "1", new EventID(new byte[] { 1 }, 100, i), false, "/x"));
}
} catch (Exception e) {
throw new AssertionError(e);
}
}
}
};
vm0.invokeAsync(toggleFlag);
vm1.invokeAsync(toggleFlag);
vm2.invokeAsync(toggleFlag);
vm3.invokeAsync(toggleFlag);
CacheSerializableRunnable joinWithThreads = new CacheSerializableRunnable("Join with the threads") {
@Override
public void run2() throws CacheException {
for (int i = 0; i < opThreads.length; ++i) {
if (opThreads[i].isInterrupted()) {
fail("Test failed because thread encountered exception");
}
ThreadUtils.join(opThreads[i], 30 * 1000);
}
}
};
vm0.invoke(joinWithThreads);
vm1.invoke(joinWithThreads);
vm2.invoke(joinWithThreads);
vm3.invoke(joinWithThreads);
System.getProperties().remove("QueueRemovalThreadWaitTime");
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class CacheServerTransactionsDUnitTest method createClientCache.
public static void createClientCache(String host, Integer port) throws Exception {
Properties props = new Properties();
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "");
new CacheServerTransactionsDUnitTest().createCache(props);
PoolImpl p = (PoolImpl) PoolManager.createFactory().addServer(host, port.intValue()).setSubscriptionEnabled(true).create("CacheServerTransctionDUnitTestPool2");
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
factory.setPoolName(p.getName());
factory.setCacheListener(new CacheListenerAdapter() {
public void afterDestroy(EntryEvent event) {
synchronized (CacheServerTransactionsDUnitTest.class) {
destroyed = true;
CacheServerTransactionsDUnitTest.class.notify();
}
}
public void afterInvalidate(EntryEvent event) {
synchronized (CacheServerTransactionsDUnitTest.class) {
invalidated = true;
CacheServerTransactionsDUnitTest.class.notifyAll();
}
}
});
Region region1 = cache.createRegion(REGION_NAME, factory.create());
assertNotNull(region1);
pool = p;
registerKeys();
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class CacheServerTransactionsDUnitTest method createServerCache.
public static Integer createServerCache(Integer maxThreads) throws Exception {
new CacheServerTransactionsDUnitTest().createCache(new Properties());
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.setCacheListener(new CacheListenerAdapter() {
public void afterDestroy(EntryEvent event) {
synchronized (CacheServerTransactionsDUnitTest.class) {
destroyed = true;
CacheServerTransactionsDUnitTest.class.notify();
}
}
public void afterInvalidate(EntryEvent event) {
synchronized (CacheServerTransactionsDUnitTest.class) {
invalidated = true;
CacheServerTransactionsDUnitTest.class.notifyAll();
}
}
});
Region r1 = cache.createRegion(REGION_NAME, factory.create());
assertNotNull(r1);
CacheServer server1 = cache.addCacheServer();
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
server1.setPort(port);
server1.setMaxThreads(maxThreads.intValue());
server1.setNotifyBySubscription(true);
server1.start();
createEntries();
return new Integer(server1.getPort());
}
use of org.apache.geode.cache.EntryEvent in project geode by apache.
the class EventIDVerificationDUnitTest method createServerCache.
public static Integer createServerCache() throws Exception {
new EventIDVerificationDUnitTest().createCache(new Properties());
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.DISTRIBUTED_ACK);
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.setCacheListener(new CacheListenerAdapter() {
public void afterCreate(EntryEvent event) {
synchronized (EventIDVerificationDUnitTest.class) {
gotCallback = true;
testEventIDResult = ((EntryEventImpl) event).getEventId().equals(eventId);
EventIDVerificationDUnitTest.class.notify();
}
}
public void afterUpdate(EntryEvent event) {
synchronized (EventIDVerificationDUnitTest.class) {
gotCallback = true;
testEventIDResult = ((EntryEventImpl) event).getEventId().equals(eventId);
EventIDVerificationDUnitTest.class.notify();
}
}
public void afterDestroy(EntryEvent event) {
synchronized (EventIDVerificationDUnitTest.class) {
gotCallback = true;
testEventIDResult = ((EntryEventImpl) event).getEventId().equals(eventId);
EventIDVerificationDUnitTest.class.notify();
}
}
public void afterRegionDestroy(RegionEvent event) {
synchronized (EventIDVerificationDUnitTest.class) {
gotCallback = true;
testEventIDResult = ((RegionEventImpl) event).getEventId().equals(eventId);
EventIDVerificationDUnitTest.class.notify();
}
}
public void afterRegionClear(RegionEvent event) {
synchronized (EventIDVerificationDUnitTest.class) {
gotCallback = true;
// verifyEventIDsDuringRegionDestroy(event);
testEventIDResult = ((RegionEventImpl) event).getEventId().equals(eventId);
EventIDVerificationDUnitTest.class.notify();
}
}
});
RegionAttributes attrs = factory.create();
cache.createRegion(REGION_NAME, attrs);
int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
CacheServer server1 = cache.addCacheServer();
server1.setPort(port);
server1.setNotifyBySubscription(true);
server1.start();
return new Integer(server1.getPort());
}
Aggregations