Search in sources :

Example 11 with EventID

use of org.apache.geode.internal.cache.EventID in project geode by apache.

the class HAEventIdPropagationDUnitTest method putAll.

/**
   * does an update and return the eventid generated. Eventid is caught in the listener and stored
   * in a static variable*
   */
public static Object[] putAll() {
    try {
        Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
        assertNotNull(region);
        Map map = new LinkedHashMap();
        map.put(PUTALL_KEY1, "value1");
        map.put(PUTALL_KEY2, "value1");
        map.put(PUTALL_KEY3, "value1");
        map.put(PUTALL_KEY4, "value1");
        map.put(PUTALL_KEY5, "value1");
        region.putAll(map);
        Thread.sleep(5000);
        EventID[] evids = new EventID[5];
        evids[0] = putAlleventId1;
        evids[1] = putAlleventId2;
        evids[2] = putAlleventId3;
        evids[3] = putAlleventId4;
        evids[4] = putAlleventId5;
        assertNotNull(evids[0]);
        assertNotNull(evids[1]);
        assertNotNull(evids[2]);
        assertNotNull(evids[3]);
        assertNotNull(evids[4]);
        return evids;
    } catch (Exception e) {
        fail("put failed due to ", e);
    }
    return null;
}
Also used : Region(org.apache.geode.cache.Region) EventID(org.apache.geode.internal.cache.EventID) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) CacheException(org.apache.geode.cache.CacheException) LinkedHashMap(java.util.LinkedHashMap)

Example 12 with EventID

use of org.apache.geode.internal.cache.EventID in project geode by apache.

the class BlockingHARegionQueueJUnitTest method testBlockingPutAndExpiry.

/**
   * Test Scenario :Blocking Queue capacity is 1. The first put should be successful.The second put
   * should block till the first put expires.
   * <p>
   * fix for 40314 - capacity constraint is checked for primary only and expiry is not applicable on
   * primary so marking this test as invalid.
   */
@Test
public void testBlockingPutAndExpiry() throws Exception {
    HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
    hrqa.setBlockingQueueCapacity(1);
    hrqa.setExpiryTime(1);
    HARegionQueue hrq = this.createHARegionQueue(this.testName.getMethodName(), hrqa);
    EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
    hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
    AtomicBoolean threadStarted = new AtomicBoolean(false);
    Thread thread = new Thread(() -> {
        try {
            threadStarted.set(true);
            EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
            hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
        } catch (Exception e) {
            errorCollector.addError(e);
        }
    });
    thread.start();
    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> threadStarted.get());
    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !thread.isAlive());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) EventID(org.apache.geode.internal.cache.EventID) ClientSubscriptionTest(org.apache.geode.test.junit.categories.ClientSubscriptionTest) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 13 with EventID

use of org.apache.geode.internal.cache.EventID in project geode by apache.

the class BlockingHARegionQueueJUnitTest method testBlockingPutAndTake.

/**
   * Tests the effect of a put which is blocked because of capacity constraint & subsequent passage
   * because of take operation
   */
@Test
public void testBlockingPutAndTake() throws Exception {
    HARegionQueueAttributes hrqa = new HARegionQueueAttributes();
    hrqa.setBlockingQueueCapacity(1);
    HARegionQueue hrq = createHARegionQueue(this.testName.getMethodName(), hrqa);
    // fix for 40314 - capacity constraint is checked for primary only.
    hrq.setPrimary(true);
    EventID id1 = new EventID(new byte[] { 1 }, 1, 1);
    hrq.put(new ConflatableObject("key1", "val1", id1, false, "testing"));
    AtomicBoolean threadStarted = new AtomicBoolean(false);
    Thread thread = new Thread(() -> {
        try {
            threadStarted.set(true);
            EventID id2 = new EventID(new byte[] { 1 }, 1, 2);
            hrq.put(new ConflatableObject("key1", "val2", id2, false, "testing"));
        } catch (InterruptedException e) {
            errorCollector.addError(e);
        }
    });
    thread.start();
    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> threadStarted.get());
    Conflatable conf = (Conflatable) hrq.take();
    assertThat(conf, notNullValue());
    Awaitility.await().atMost(1, TimeUnit.MINUTES).until(() -> !thread.isAlive());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) EventID(org.apache.geode.internal.cache.EventID) Conflatable(org.apache.geode.internal.cache.Conflatable) ClientSubscriptionTest(org.apache.geode.test.junit.categories.ClientSubscriptionTest) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 14 with EventID

use of org.apache.geode.internal.cache.EventID in project geode by apache.

the class EventIdOptimizationDUnitTest method validateEventsAtReceivingClientListener.

/**
   * Validates that the eventId of the event received in callback is contained in the eventId array
   * originally used by client1 to generate the events and notifies client2 to proceed for
   * validation once the LAST_KEY is received
   * 
   * @param key - the key of the event for EntryEvent / token indicating type of region operation
   *        for RegionEvent
   */
public static void validateEventsAtReceivingClientListener(String key) {
    EventID eventIdAtClient2 = (EventID) assertThreadIdToSequenceIdMapHasEntryId();
    if ((eventIdAtClient2.getThreadID() == eventIdForLastKey.getThreadID()) && (eventIdAtClient2.getSequenceID() == eventIdForLastKey.getSequenceID())) {
        LogWriterUtils.getLogWriter().info("Notifying client2 to proceed for validation");
        proceedForValidation = true;
    } else {
        boolean containsEventId = false;
        for (int i = 0; i < eventIds.length; i++) {
            if ((eventIdAtClient2.getThreadID() == eventIds[i].getThreadID()) && (eventIdAtClient2.getSequenceID() == eventIds[i].getSequenceID())) {
                containsEventId = true;
                break;
            }
        }
        if (!containsEventId) {
            validationFailed = true;
            failureMsg.append("key = ").append(key).append(" ; eventID = ").append(eventIdAtClient2).append(System.getProperty("line.separator"));
        }
    }
}
Also used : EventID(org.apache.geode.internal.cache.EventID)

Example 15 with EventID

use of org.apache.geode.internal.cache.EventID in project geode by apache.

the class DestroyEntryPropagationDUnitTest method acquireConnectionsAndDestroyEntriesK1andK2.

private void acquireConnectionsAndDestroyEntriesK1andK2() {
    try {
        Region r1 = cache.getRegion(Region.SEPARATOR + REGION_NAME);
        assertNotNull(r1);
        String poolName = r1.getAttributes().getPoolName();
        assertNotNull(poolName);
        PoolImpl pool = (PoolImpl) PoolManager.find(poolName);
        assertNotNull(pool);
        Connection conn = pool.acquireConnection();
        final Connection conn1;
        if (conn.getServer().getPort() != PORT2) {
            // Ensure we have a server with the proper port
            conn1 = pool.acquireConnection();
        } else {
            conn1 = conn;
        }
        assertNotNull(conn1);
        assertEquals(PORT2, conn1.getServer().getPort());
        ServerRegionProxy srp = new ServerRegionProxy(Region.SEPARATOR + REGION_NAME, pool);
        srp.destroyOnForTestsOnly(conn1, "key1", null, Operation.DESTROY, new EventIDHolder(new EventID(new byte[] { 1 }, 100000, 1)), null);
        srp.destroyOnForTestsOnly(conn1, "key2", null, Operation.DESTROY, new EventIDHolder(new EventID(new byte[] { 1 }, 100000, 2)), null);
    } catch (Exception ex) {
        throw new AssertionError("Failed while setting acquireConnectionsAndDestroyEntry  ", ex);
    }
}
Also used : ServerRegionProxy(org.apache.geode.cache.client.internal.ServerRegionProxy) EventIDHolder(org.apache.geode.internal.cache.EventIDHolder) Connection(org.apache.geode.cache.client.internal.Connection) Region(org.apache.geode.cache.Region) EventID(org.apache.geode.internal.cache.EventID) PoolImpl(org.apache.geode.cache.client.internal.PoolImpl) CacheException(org.apache.geode.cache.CacheException) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Aggregations

EventID (org.apache.geode.internal.cache.EventID)147 Test (org.junit.Test)66 ClientSubscriptionTest (org.apache.geode.test.junit.categories.ClientSubscriptionTest)60 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)58 IOException (java.io.IOException)41 Map (java.util.Map)33 CacheException (org.apache.geode.cache.CacheException)31 Conflatable (org.apache.geode.internal.cache.Conflatable)29 LocalRegion (org.apache.geode.internal.cache.LocalRegion)23 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)20 HashMap (java.util.HashMap)16 Part (org.apache.geode.internal.cache.tier.sockets.Part)16 ByteBuffer (java.nio.ByteBuffer)14 Iterator (java.util.Iterator)14 List (java.util.List)14 LinkedHashMap (java.util.LinkedHashMap)13 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)13 EntryEventImpl (org.apache.geode.internal.cache.EntryEventImpl)13 AuthorizeRequest (org.apache.geode.internal.security.AuthorizeRequest)13 ConcurrentMap (java.util.concurrent.ConcurrentMap)12