use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class ParallelGatewaySenderImpl method setModifiedEventId.
@Override
protected void setModifiedEventId(EntryEventImpl clonedEvent) {
int bucketId = -1;
// merged from 42004
if (clonedEvent.getRegion() instanceof DistributedRegion) {
bucketId = PartitionedRegionHelper.getHashKey(clonedEvent.getKey(), getMaxParallelismForReplicatedRegion());
} else {
bucketId = PartitionedRegionHelper.getHashKey((EntryOperation) clonedEvent);
}
EventID originalEventId = clonedEvent.getEventId();
long originatingThreadId = ThreadIdentifier.getRealThreadID(originalEventId.getThreadID());
long newThreadId = ThreadIdentifier.createFakeThreadIDForParallelGSPrimaryBucket(bucketId, originatingThreadId, getEventIdIndex());
// In case of parallel as all events go through primary buckets
// we don't need to generate different threadId for secondary buckets
// as they will be rejected if seen at PR level itself
EventID newEventId = new EventID(originalEventId.getMembershipID(), newThreadId, originalEventId.getSequenceID(), bucketId);
if (logger.isDebugEnabled()) {
logger.debug("{}: Generated event id for event with key={}, bucketId={}, original event id={}, threadId={}, new event id={}, newThreadId={}", this, clonedEvent.getKey(), bucketId, originalEventId, originatingThreadId, newEventId, newThreadId);
}
clonedEvent.setEventId(newEventId);
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class ElidedPutAllDUnitTest method testElidedPutAllOnPR.
/**
* bug #47425 - elided putAll event causes PutAllPartialResultException
*/
@Test
public void testElidedPutAllOnPR() throws Exception {
final String regionName = getUniqueName() + "Region";
final String key = "key-1";
Cache cache = getCache();
PartitionedRegion region = (PartitionedRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
region.put(key, "value-1");
region.put(key, "value-2");
Entry<?, ?> entry = region.getEntry(key);
assertTrue("expected entry to be in this vm", entry != null);
VM vm1 = Host.getHost(0).getVM(1);
vm1.invoke(new SerializableRunnable("perform conflicting update") {
@Override
public void run() {
Cache cache = getCache();
PartitionedRegion region = (PartitionedRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
try {
Entry<?, ?> entry = region.getEntry(key);
assertTrue(entry instanceof EntrySnapshot);
RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
final VersionTag<?> tag = regionEntry.getVersionStamp().asVersionTag();
tag.setEntryVersion(tag.getEntryVersion() - 1);
tag.setRegionVersion(1);
Map<String, String> map = new HashMap<String, String>();
map.put(key, "value-3");
DistributedPutAllOperation dpao = region.newPutAllOperation(map, null);
EntryEventImpl event = EntryEventImpl.create(region, Operation.PUTALL_CREATE, null, null, null, true, (DistributedMember) tag.getMemberID());
event.setOldValue("value-1");
event.setVersionTag(tag);
event.setEventId(new EventID(cache.getDistributedSystem()));
event.setKeyInfo(((PartitionedRegion) region).getKeyInfo(key));
dpao.addEntry(event, event.getKeyInfo().getBucketId());
// getLogWriter().info("dpao data = " + dpao.getPutAllEntryData()[0]);
VersionedObjectList successfulPuts = new VersionedObjectList(1, true, true);
successfulPuts.addKeyAndVersion(key, tag);
try {
region.postPutAllSend(dpao, successfulPuts);
} catch (ConcurrentCacheModificationException e) {
Assert.fail("Should not have received an exception for an elided operation", e);
} finally {
event.release();
dpao.getBaseEvent().release();
dpao.freeOffHeapResources();
}
} catch (Exception e) {
Assert.fail("caught unexpected exception", e);
}
}
});
entry = region.getEntry(key);
assertTrue("expected value-2: " + entry.getValue(), entry.getValue().equals("value-2"));
RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
final VersionTag<?> tag = regionEntry.getVersionStamp().asVersionTag();
assertTrue(tag.getEntryVersion() == 2);
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class PutAllDUnitTest method putAll.
/**
* does an update and return the eventid generated. Eventid is caught in the listener and stored
* in a static variable*
*/
public static Object[] putAll() {
Region region = cache.getRegion(Region.SEPARATOR + REGION_NAME);
assertNotNull(region);
try {
Map map = new LinkedHashMap();
map.put(PUTALL_KEY1, PUTALL_VALUE1);
map.put(PUTALL_KEY2, PUTALL_VALUE2);
map.put(PUTALL_KEY3, PUTALL_VALUE3);
map.put(PUTALL_KEY4, PUTALL_VALUE4);
map.put(PUTALL_KEY5, PUTALL_VALUE5);
region.putAll(map, "putAllCallbackArg");
EventID[] evids = new EventID[5];
evids[0] = putAlleventId1;
evids[1] = putAlleventId2;
evids[2] = putAlleventId3;
evids[3] = putAlleventId4;
evids[4] = putAlleventId5;
assertNotNull(evids[0]);
assertNotNull(evids[1]);
assertNotNull(evids[2]);
assertNotNull(evids[3]);
assertNotNull(evids[4]);
return evids;
} catch (Exception e) {
fail("put failed due to " + e);
}
return null;
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class PutAllDUnitTest method assertThreadIdToSequenceIdMapHasEntryIds.
/** function to assert that the ThreadIdtoSequence id Map is not Null and has only one entry **/
public static Object[] assertThreadIdToSequenceIdMapHasEntryIds() {
EventID[] evids = new EventID[5];
Map map = pool.getThreadIdToSequenceIdMap();
assertNotNull(map);
evids[0] = putAlleventId1;
evids[1] = putAlleventId2;
evids[2] = putAlleventId3;
evids[3] = putAlleventId4;
evids[4] = putAlleventId5;
assertNotNull(evids[0]);
assertNotNull(evids[1]);
assertNotNull(evids[2]);
assertNotNull(evids[3]);
assertNotNull(evids[4]);
return evids;
}
use of org.apache.geode.internal.cache.EventID in project geode by apache.
the class PutAllDUnitTest method assertThreadIdToSequenceIdMapHasEntryId.
/** function to assert that the ThreadIdtoSequence id Map is not Null and has only one entry **/
public static Object assertThreadIdToSequenceIdMapHasEntryId() {
Map map = pool.getThreadIdToSequenceIdMap();
assertNotNull(map);
// The map size can now be 1 or 2 because of the server thread putting
// the marker in the queue. If it is 2, the first entry is the server
// thread; the second is the client thread. If it is 1, the entry is the
// client thread. The size changes because of the map.clear call below.
assertTrue(map.size() != 0);
// Set the entry to the last entry
Map.Entry entry = null;
for (Iterator threadIdToSequenceIdMapIterator = map.entrySet().iterator(); threadIdToSequenceIdMapIterator.hasNext(); ) {
entry = (Map.Entry) threadIdToSequenceIdMapIterator.next();
}
ThreadIdentifier tid = (ThreadIdentifier) entry.getKey();
SequenceIdAndExpirationObject seo = (SequenceIdAndExpirationObject) entry.getValue();
long sequenceId = seo.getSequenceId();
EventID evId = new EventID(tid.getMembershipID(), tid.getThreadID(), sequenceId);
synchronized (map) {
map.clear();
}
return evId;
}
Aggregations