use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class HAEventIdPropagationDUnitTest method putAll.
/**
* does an update and return the eventid generated. Eventid is caught in the listener and stored
* in a static variable*
*/
public static Object[] putAll() {
try {
Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
assertNotNull(region);
Map map = new LinkedHashMap();
map.put(PUTALL_KEY1, "value1");
map.put(PUTALL_KEY2, "value1");
map.put(PUTALL_KEY3, "value1");
map.put(PUTALL_KEY4, "value1");
map.put(PUTALL_KEY5, "value1");
region.putAll(map);
Thread.sleep(5000);
EventID[] evids = new EventID[5];
evids[0] = putAlleventId1;
evids[1] = putAlleventId2;
evids[2] = putAlleventId3;
evids[3] = putAlleventId4;
evids[4] = putAlleventId5;
assertNotNull(evids[0]);
assertNotNull(evids[1]);
assertNotNull(evids[2]);
assertNotNull(evids[3]);
assertNotNull(evids[4]);
return evids;
} catch (Exception e) {
fail("put failed due to ", e);
}
return null;
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class BlockingHARegionQueueJUnitTest method testBlockingPutAndExpiry.
/**
* Test Scenario :Blocking Queue capacity is 1. The first put should be successful.The second put
* should block till the first put expires.
* <p>
* fix for 40314 - capacity constraint is checked for primary only and expiry is not applicable on
* primary so marking this test as invalid.
*/
@Test
public void testBlockingPutAndExpiry() throws Exception {
HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
hrqa.setBlockingQueueCapacity(1);
hrqa.setExpiryTime(1);
HARegionQueue hrq = this.createHARegionQueue(this.testName.getMethodName(), hrqa);
EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
AtomicBoolean threadStarted = new AtomicBoolean(false);
Thread thread = new Thread(() -> {
try {
threadStarted.set(true);
EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
} catch (Exception e) {
errorCollector.addError(e);
}
});
thread.start();
Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> threadStarted.get());
Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !thread.isAlive());
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class BlockingHARegionQueueJUnitTest method testBlockingPutAndTake.
/**
* Tests the effect of a put which is blocked because of capacity constraint & subsequent passage
* because of take operation
*/
@Test
public void testBlockingPutAndTake() throws Exception {
HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
hrqa.setBlockingQueueCapacity(1);
HARegionQueue hrq = createHARegionQueue(this.testName.getMethodName(), hrqa);
// fix for 40314 - capacity constraint is checked for primary only.
hrq.setPrimary(true);
EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
AtomicBoolean threadStarted = new AtomicBoolean(false);
Thread thread = new Thread(() -> {
try {
threadStarted.set(true);
EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
} catch (InterruptedException e) {
errorCollector.addError(e);
}
});
thread.start();
Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> threadStarted.get());
Conflatable conf = (Conflatable) hrq.take();
assertThat(conf, notNullValue());
Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !thread.isAlive());
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class EventIdOptimizationDUnitTest method validateEventsAtReceivingClientListener.
/**
* Validates that the eventId of the event received in callback is contained in the eventId array
* originally used by client1 to generate the events and notifies client2 to proceed for
* validation once the LAST_KEY is received
*
* @param key - the key of the event for EntryEvent / token indicating type of region operation
* for RegionEvent
*/
public static void validateEventsAtReceivingClientListener(String key) {
EventID eventIdAtClient2 = (EventID) assertThreadIdToSequenceIdMapHasEntryId();
if ((eventIdAtClient2.getThreadID() == eventIdForLastKey.getThreadID()) && (eventIdAtClient2.getSequenceID() == eventIdForLastKey.getSequenceID())) {
LogWriterUtils.getLogWriter().info("Notifying client2 to proceed for validation");
proceedForValidation = true;
} else {
boolean containsEventId = false;
for (int i = 0; i < eventIds.length; i++) {
if ((eventIdAtClient2.getThreadID() == eventIds[i].getThreadID()) && (eventIdAtClient2.getSequenceID() == eventIds[i].getSequenceID())) {
containsEventId = true;
break;
}
}
if (!containsEventId) {
validationFailed = true;
failureMsg.append("key = ").append(key).append(" ; eventID = ").append(eventIdAtClient2).append(System.getProperty("line.separator"));
}
}
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class DestroyEntryPropagationDUnitTest method acquireConnectionsAndDestroyEntriesK1andK2.
private void acquireConnectionsAndDestroyEntriesK1andK2() {
try {
Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
assertNotNull(r1);
String poolName = r1.getAttributes().getPoolName();
assertNotNull(poolName);
PoolImpl pool = (PoolImpl) PoolManager.find(poolName);
assertNotNull(pool);
Connection conn = pool.acquireConnection();
final Connection conn1;
if (conn.getServer().getPort() != PORT2) {
// Ensure we have a server with the proper port
conn1 = pool.acquireConnection();
} else {
conn1 = conn;
}
assertNotNull(conn1);
assertEquals(PORT2, conn1.getServer().getPort());
ServerRegionProxy srp = new ServerRegionProxy(Region.SEPARATOR + REGION_NAME, pool);
srp.destroyOnForTestsOnly(conn1, "key1", null, Operation.DESTROY, new EventIDHolder(new EventID(new byte[] { 1 }, 100000, 1)), null);
srp.destroyOnForTestsOnly(conn1, "key2", null, Operation.DESTROY, new EventIDHolder(new EventID(new byte[] { 1 }, 100000, 2)), null);
} catch (Exception ex) {
throw new AssertionError("Failed while setting acquireConnectionsAndDestroyEntry ", ex);
}
}
Aggregations