use of org.apache.geode.internal.cache.DistributedPutAllOperation in project geode by apache.
the class DistTxEntryEvent method putAllFromData.
/**
* @param in
* @throws IOException
* @throws ClassNotFoundException
*/
private void putAllFromData(DataInput in) throws IOException, ClassNotFoundException {
int putAllSize = DataSerializer.readInteger(in);
PutAllEntryData[] putAllEntries = new PutAllEntryData[putAllSize];
if (putAllSize > 0) {
final Version version = InternalDataSerializer.getVersionForDataStreamOrNull(in);
final ByteArrayDataInput bytesIn = new ByteArrayDataInput();
for (int i = 0; i < putAllSize; i++) {
putAllEntries[i] = new PutAllEntryData(in, this.eventID, i, version, bytesIn);
}
boolean hasTags = in.readBoolean();
if (hasTags) {
EntryVersionsList versionTags = EntryVersionsList.create(in);
for (int i = 0; i < putAllSize; i++) {
putAllEntries[i].versionTag = versionTags.get(i);
}
}
}
// TODO DISTTX: release this event?
EntryEventImpl e = EntryEventImpl.create(this.region, Operation.PUTALL_CREATE, null, null, null, true, this.getDistributedMember(), true, true);
this.putAllOp = new DistributedPutAllOperation(e, putAllSize, false);
this.putAllOp.setPutAllEntryData(putAllEntries);
}
use of org.apache.geode.internal.cache.DistributedPutAllOperation in project geode by apache.
the class PutAllPRMessage method doLocalPutAll.
/**
* This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
* sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
* very important that this message does NOT cause any deadlocks as the sender will wait
* indefinitely for the acknowledgment
*
* @param r partitioned region eventSender the endpoint server who received request from client
* lastModified timestamp for last modification
* @return If succeeds, return true, otherwise, throw exception
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings("IMSE_DONT_CATCH_IMSE")
public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, ForceReattemptException, DataLocationException {
boolean didPut = false;
long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
if (r.hasServerProxy()) {
clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut);
}
}
DistributedPutAllOperation dpao = null;
@Released EntryEventImpl baseEvent = null;
BucketRegion bucketRegion = null;
PartitionedRegionDataStore ds = r.getDataStore();
InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
try {
if (!notificationOnly) {
// bucketRegion is not null only when !notificationOnly
bucketRegion = ds.getInitializedBucketForId(null, bucketId);
this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
// create a base event and a DPAO for PutAllMessage distributed btw redundant buckets
baseEvent = EntryEventImpl.create(bucketRegion, Operation.PUTALL_CREATE, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
// set baseEventId to the first entry's event id. We need the thread id for DACE
baseEvent.setEventId(putAllPRData[0].getEventID());
if (this.bridgeContext != null) {
baseEvent.setContext(this.bridgeContext);
}
baseEvent.setPossibleDuplicate(this.posDup);
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
}
dpao = new DistributedPutAllOperation(baseEvent, putAllPRDataSize, false);
}
// Fix the updateMsg misorder issue
// Lock the keys when doing postPutAll
Object[] keys = new Object[putAllPRDataSize];
for (int i = 0; i < putAllPRDataSize; ++i) {
keys[i] = putAllPRData[i].getKey();
}
if (!notificationOnly) {
try {
if (putAllPRData.length > 0) {
if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
if (logger.isDebugEnabled()) {
logger.debug("attempting to locate version tags for retried event");
}
// of the previous attempt
for (int i = 0; i < putAllPRDataSize; i++) {
if (putAllPRData[i].versionTag == null) {
putAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(putAllPRData[i].getEventID());
if (putAllPRData[i].versionTag != null) {
putAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
}
}
}
}
EventID eventID = putAllPRData[0].getEventID();
ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
bucketRegion.recordBulkOpStart(membershipID, eventID);
}
bucketRegion.waitUntilLocked(keys);
boolean lockedForPrimary = false;
final HashMap succeeded = new HashMap();
PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
Object key = keys[0];
try {
bucketRegion.doLockForPrimary(false);
lockedForPrimary = true;
/*
* The real work to be synchronized, it will take long time. We don't worry about
* another thread to send any msg which has the same key in this request, because these
* request will be blocked by foundKey
*/
for (int i = 0; i < putAllPRDataSize; i++) {
@Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
key = ev.getKey();
ev.setPutAllOperation(dpao);
// make sure a local update inserts a cache de-serializable
ev.makeSerializedNewValue();
// then in basicPutPart3(), the ev is added into dpao
try {
didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true);
if (didPut && logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll:putLocally success for {}", ev);
}
} catch (ConcurrentCacheModificationException e) {
didPut = true;
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage.doLocalPutAll:putLocally encountered concurrent cache modification for {}", ev, e);
}
}
putAllPRData[i].setTailKey(ev.getTailKey());
if (!didPut) {
// make sure the region hasn't gone away
r.checkReadiness();
ForceReattemptException fre = new ForceReattemptException("unable to perform put in PutAllPR, but operation should not fail");
fre.setHash(ev.getKey().hashCode());
throw fre;
} else {
succeeded.put(putAllPRData[i].getKey(), putAllPRData[i].getValue());
this.versions.addKeyAndVersion(putAllPRData[i].getKey(), ev.getVersionTag());
}
} finally {
ev.release();
}
}
// for
} catch (IllegalMonitorStateException ignore) {
throw new ForceReattemptException("unable to get lock for primary, retrying... ");
} catch (CacheWriterException cwe) {
// encounter cacheWriter exception
partialKeys.saveFailedKey(key, cwe);
} finally {
try {
// Only PutAllPRMessage knows if the thread id is fake. Event has no idea.
// So we have to manually set useFakeEventId for this DPAO
dpao.setUseFakeEventId(true);
r.checkReadiness();
bucketRegion.getDataView().postPutAll(dpao, this.versions, bucketRegion);
} finally {
if (lockedForPrimary) {
bucketRegion.doUnlockForPrimary();
}
}
}
if (partialKeys.hasFailure()) {
partialKeys.addKeysAndVersions(this.versions);
if (logger.isDebugEnabled()) {
logger.debug("PutAllPRMessage: partial keys applied, map to bucket {}'s keys: {}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
}
throw new PutAllPartialResultException(partialKeys);
}
} catch (RegionDestroyedException e) {
ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
} finally {
bucketRegion.removeAndNotifyKeys(keys);
}
} else {
for (int i = 0; i < putAllPRDataSize; i++) {
EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
try {
ev.setOriginRemote(true);
if (this.callbackArg != null) {
ev.setCallbackArgument(this.callbackArg);
}
r.invokePutCallbacks(ev.getOperation().isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, ev, r.isInitialized(), true);
} finally {
ev.release();
}
}
}
} finally {
if (baseEvent != null)
baseEvent.release();
if (dpao != null)
dpao.freeOffHeapResources();
}
return true;
}
use of org.apache.geode.internal.cache.DistributedPutAllOperation in project geode by apache.
the class ElidedPutAllDUnitTest method testElidedPutAllOnPR.
/**
* bug #47425 - elided putAll event causes PutAllPartialResultException
*/
@Test
public void testElidedPutAllOnPR() throws Exception {
final String regionName = getUniqueName() + "Region";
final String key = "key-1";
Cache cache = getCache();
PartitionedRegion region = (PartitionedRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
region.put(key, "value-1");
region.put(key, "value-2");
Entry<?, ?> entry = region.getEntry(key);
assertTrue("expected entry to be in this vm", entry != null);
VM vm1 = Host.getHost(0).getVM(1);
vm1.invoke(new SerializableRunnable("perform conflicting update") {
@Override
public void run() {
Cache cache = getCache();
PartitionedRegion region = (PartitionedRegion) cache.createRegionFactory(RegionShortcut.PARTITION).create(regionName);
try {
Entry<?, ?> entry = region.getEntry(key);
assertTrue(entry instanceof EntrySnapshot);
RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
final VersionTag<?> tag = regionEntry.getVersionStamp().asVersionTag();
tag.setEntryVersion(tag.getEntryVersion() - 1);
tag.setRegionVersion(1);
Map<String, String> map = new HashMap<String, String>();
map.put(key, "value-3");
DistributedPutAllOperation dpao = region.newPutAllOperation(map, null);
EntryEventImpl event = EntryEventImpl.create(region, Operation.PUTALL_CREATE, null, null, null, true, (DistributedMember) tag.getMemberID());
event.setOldValue("value-1");
event.setVersionTag(tag);
event.setEventId(new EventID(cache.getDistributedSystem()));
event.setKeyInfo(((PartitionedRegion) region).getKeyInfo(key));
dpao.addEntry(event, event.getKeyInfo().getBucketId());
// getLogWriter().info("dpao data = " + dpao.getPutAllEntryData()[0]);
VersionedObjectList successfulPuts = new VersionedObjectList(1, true, true);
successfulPuts.addKeyAndVersion(key, tag);
try {
region.postPutAllSend(dpao, successfulPuts);
} catch (ConcurrentCacheModificationException e) {
Assert.fail("Should not have received an exception for an elided operation", e);
} finally {
event.release();
dpao.getBaseEvent().release();
dpao.freeOffHeapResources();
}
} catch (Exception e) {
Assert.fail("caught unexpected exception", e);
}
}
});
entry = region.getEntry(key);
assertTrue("expected value-2: " + entry.getValue(), entry.getValue().equals("value-2"));
RegionEntry regionEntry = ((EntrySnapshot) entry).getRegionEntry();
final VersionTag<?> tag = regionEntry.getVersionStamp().asVersionTag();
assertTrue(tag.getEntryVersion() == 2);
}
Aggregations