use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class DistTXState method applyIndividualOp.
/**
* Apply the individual tx op on secondary
*
* Calls local function such as putEntry instead of putEntryOnRemote as for this
* {@link DistTXStateOnCoordinator} as events will always be local. In parent {@link DistTXState}
* class will call remote version of functions
*
*/
protected boolean applyIndividualOp(DistTxEntryEvent dtop) throws DataLocationException {
boolean result = true;
if (dtop.op.isUpdate() || dtop.op.isCreate()) {
if (dtop.op.isPutAll()) {
assert (dtop.getPutAllOperation() != null);
// [DISTTX] TODO what do with versions next?
final VersionedObjectList versions = new VersionedObjectList(dtop.getPutAllOperation().putAllDataSize, true, dtop.region.concurrencyChecksEnabled);
postPutAll(dtop.getPutAllOperation(), versions, dtop.region);
} else {
result = putEntryOnRemote(dtop, false, /* ifNew */
false, /* ifOld */
null, /* expectedOldValue */
false, /* requireOldValue */
0L, /* lastModified */
true);
}
} else if (dtop.op.isDestroy()) {
if (dtop.op.isRemoveAll()) {
assert (dtop.getRemoveAllOperation() != null);
// [DISTTX] TODO what do with versions next?
final VersionedObjectList versions = new VersionedObjectList(dtop.getRemoveAllOperation().removeAllDataSize, true, dtop.region.concurrencyChecksEnabled);
postRemoveAll(dtop.getRemoveAllOperation(), versions, dtop.region);
} else {
destroyOnRemote(dtop, false, /* TODO [DISTTX] */
null);
}
} else if (dtop.op.isInvalidate()) {
invalidateOnRemote(dtop, true, /* TODO [DISTTX] */
false);
} else {
logger.debug("DistTXCommitPhaseOneMessage: unsupported TX operation {}", dtop);
assert (false);
}
return result;
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class RemoteRemoveAllMessage method doLocalRemoveAll.
/* we need a event with content for waitForNodeOrCreateBucket() */
/**
* This method is called by both operateOnLocalRegion() when processing a remote msg or by
* sendMsgByBucket() when processing a msg targeted to local Jvm. LocalRegion Note: It is very
* important that this message does NOT cause any deadlocks as the sender will wait indefinitely
* for the acknowledgment
*
* @param r partitioned region
* @param eventSender the endpoint server who received request from client
* @return If succeeds, return true, otherwise, throw exception
*/
public boolean doLocalRemoveAll(final LocalRegion r, final InternalDistributedMember eventSender) throws EntryExistsException, RemoteOperationException {
final DistributedRegion dr = (DistributedRegion) r;
// create a base event and a op for RemoveAllMessage distributed btw redundant buckets
@Released EntryEventImpl baseEvent = EntryEventImpl.create(r, Operation.REMOVEALL_DESTROY, null, null, this.callbackArg, false, eventSender, true);
try {
baseEvent.setCausedByMessage(this);
// set baseEventId to the first entry's event id. We need the thread id for DACE
baseEvent.setEventId(this.eventId);
if (this.bridgeContext != null) {
baseEvent.setContext(this.bridgeContext);
}
baseEvent.setPossibleDuplicate(this.posDup);
if (logger.isDebugEnabled()) {
logger.debug("RemoteRemoveAllMessage.doLocalRemoveAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
}
final DistributedRemoveAllOperation op = new DistributedRemoveAllOperation(baseEvent, removeAllDataCount, false);
try {
final VersionedObjectList versions = new VersionedObjectList(removeAllDataCount, true, dr.concurrencyChecksEnabled);
dr.syncBulkOp(new Runnable() {
@SuppressWarnings("synthetic-access")
public void run() {
InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
for (int i = 0; i < removeAllDataCount; ++i) {
@Released EntryEventImpl ev = RemoveAllPRMessage.getEventFromEntry(r, myId, eventSender, i, removeAllData, false, bridgeContext, posDup, false);
try {
ev.setRemoveAllOperation(op);
if (logger.isDebugEnabled()) {
logger.debug("invoking basicDestroy with {}", ev);
}
try {
dr.basicDestroy(ev, true, null);
} catch (EntryNotFoundException ignore) {
}
removeAllData[i].versionTag = ev.getVersionTag();
versions.addKeyAndVersion(removeAllData[i].key, ev.getVersionTag());
} finally {
ev.release();
}
}
}
}, baseEvent.getEventId());
if (getTXUniqId() != TXManagerImpl.NOTX || dr.getConcurrencyChecksEnabled()) {
dr.getDataView().postRemoveAll(op, versions, dr);
}
RemoveAllReplyMessage.send(getSender(), this.processorId, getReplySender(r.getDistributionManager()), versions, this.removeAllData, this.removeAllDataCount);
return false;
} finally {
op.freeOffHeapResources();
}
} finally {
baseEvent.release();
}
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class RemoteRemoveAllMessage method distribute.
/*
* this is similar to send() but it selects an initialized replicate that is used to proxy the
* message
*
*/
public static boolean distribute(EntryEventImpl event, RemoveAllEntryData[] data, int dataCount) {
boolean successful = false;
DistributedRegion r = (DistributedRegion) event.getRegion();
Collection replicates = r.getCacheDistributionAdvisor().adviseInitializedReplicates();
if (replicates.isEmpty()) {
return false;
}
if (replicates.size() > 1) {
ArrayList l = new ArrayList(replicates);
Collections.shuffle(l);
replicates = l;
}
int attempts = 0;
for (Iterator<InternalDistributedMember> it = replicates.iterator(); it.hasNext(); ) {
InternalDistributedMember replicate = it.next();
try {
attempts++;
final boolean posDup = (attempts > 1);
RemoveAllResponse response = send(replicate, event, data, dataCount, false, DistributionManager.SERIAL_EXECUTOR, posDup);
response.waitForCacheException();
VersionedObjectList result = response.getResponse();
// Set successful version tags in RemoveAllEntryData.
List successfulKeys = result.getKeys();
List<VersionTag> versions = result.getVersionTags();
for (RemoveAllEntryData removeAllEntry : data) {
Object key = removeAllEntry.getKey();
if (successfulKeys.contains(key)) {
int index = successfulKeys.indexOf(key);
removeAllEntry.versionTag = versions.get(index);
}
}
return true;
} catch (TransactionDataNotColocatedException enfe) {
throw enfe;
} catch (CancelException e) {
event.getRegion().getCancelCriterion().checkCancelInProgress(e);
} catch (CacheException e) {
if (logger.isDebugEnabled()) {
logger.debug("RemoteRemoveAllMessage caught CacheException during distribution", e);
}
// not a cancel-exception, so don't complain any more about it
successful = true;
} catch (RemoteOperationException e) {
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "RemoteRemoveAllMessage caught an unexpected exception during distribution", e);
}
}
}
return successful;
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class RemoveAllOp method execute.
/**
* Does a region put on a server using connections from the given pool to communicate with the
* server.
*
* @param pool the pool to use to communicate with the server.
* @param region the name of the region to do the removeAll on
* @param keys the Collection of keys to remove
* @param eventId the event id for this removeAll
*/
public static VersionedObjectList execute(ExecutablePool pool, Region region, Collection<Object> keys, EventID eventId, int retryAttempts, Object callbackArg) {
final boolean isDebugEnabled = logger.isDebugEnabled();
ClientMetadataService cms = ((LocalRegion) region).getCache().getClientMetadataService();
Map<ServerLocation, HashSet> serverToFilterMap = cms.getServerToFilterMap(keys, region, true);
if (serverToFilterMap == null || serverToFilterMap.isEmpty()) {
AbstractOp op = new RemoveAllOpImpl(region, keys, eventId, ((PoolImpl) pool).getPRSingleHopEnabled(), callbackArg);
op.initMessagePart();
return (VersionedObjectList) pool.execute(op);
}
List callableTasks = constructAndGetRemoveAllTasks(region, eventId, serverToFilterMap, (PoolImpl) pool, callbackArg);
if (isDebugEnabled) {
logger.debug("RemoveAllOp#execute : Number of removeAll tasks is :{}", callableTasks.size());
}
HashMap<ServerLocation, RuntimeException> failedServers = new HashMap<ServerLocation, RuntimeException>();
PutAllPartialResult result = new PutAllPartialResult(keys.size());
try {
Map<ServerLocation, Object> results = SingleHopClientExecutor.submitBulkOp(callableTasks, cms, (LocalRegion) region, failedServers);
for (Map.Entry<ServerLocation, Object> entry : results.entrySet()) {
Object value = entry.getValue();
if (value instanceof PutAllPartialResultException) {
PutAllPartialResultException pap = (PutAllPartialResultException) value;
if (isDebugEnabled) {
logger.debug("RemoveAll SingleHop encountered BulkOpPartialResultException exception: {}, failedServers are {}", pap, failedServers.keySet());
}
result.consolidate(pap.getResult());
} else {
if (value != null) {
VersionedObjectList list = (VersionedObjectList) value;
result.addKeysAndVersions(list);
}
}
}
} catch (RuntimeException ex) {
logger.debug("single-hop removeAll encountered unexpected exception: {}", ex);
throw ex;
}
if (!failedServers.isEmpty()) {
if (retryAttempts == 0) {
throw failedServers.values().iterator().next();
}
// add them to the partial result set
if (result.getSucceededKeysAndVersions().size() == 0) {
// if there're failed servers, we need to save the succeed keys in submitRemoveAll
// if retry succeeded, everything is ok, otherwise, the saved "succeeded
// keys" should be consolidated into PutAllPartialResultException
// succeedKeySet is used to send back to client in PartialResult case
// so it's not a must to use LinkedHashSet
Set succeedKeySet = new LinkedHashSet();
Set<ServerLocation> serverSet = serverToFilterMap.keySet();
for (ServerLocation server : serverSet) {
if (!failedServers.containsKey(server)) {
succeedKeySet.addAll(serverToFilterMap.get(server));
}
}
// save succeedKeys, but if retries all succeeded, discard the PutAllPartialResult
result.addKeys(succeedKeySet);
}
// send maps for the failed servers one by one instead of merging
// them into one big map. The reason is, we have to keep the same event
// ids for each sub map. There is a unit test in PutAllCSDUnitTest for
// the otherwise case.
boolean oneSubMapRetryFailed = false;
Set<ServerLocation> failedServerSet = failedServers.keySet();
for (ServerLocation failedServer : failedServerSet) {
// Throwable failedServers.values().iterator().next();
RuntimeException savedRTE = failedServers.get(failedServer);
if (savedRTE instanceof PutAllPartialResultException) {
// will not retry for BulkOpPartialResultException
// but it means at least one sub map ever failed
oneSubMapRetryFailed = true;
continue;
}
Collection<Object> newKeys = serverToFilterMap.get(failedServer);
try {
VersionedObjectList v = RemoveAllOp.execute(pool, region, newKeys, eventId, true, callbackArg);
if (v == null) {
result.addKeys(newKeys);
} else {
result.addKeysAndVersions(v);
}
} catch (PutAllPartialResultException pre) {
oneSubMapRetryFailed = true;
logger.debug("Retry failed with BulkOpPartialResultException: {} Before retry: {}", pre, result.getKeyListString());
result.consolidate(pre.getResult());
} catch (Exception rte) {
oneSubMapRetryFailed = true;
Object firstKey = newKeys.iterator().next();
result.saveFailedKey(firstKey, rte);
}
}
// If all retries succeeded, the PRE in first tries can be ignored
if (oneSubMapRetryFailed && result.hasFailure()) {
PutAllPartialResultException pre = new PutAllPartialResultException(result);
throw pre;
}
}
return result.getSucceededKeysAndVersions();
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class PartitionedRegion method postPutAllSend.
/**
* Create PutAllPRMsgs for each bucket, and send them.
*
* @param putAllOp DistributedPutAllOperation object.
* @param successfulPuts not used in PartitionedRegion.
*/
@Override
public long postPutAllSend(DistributedPutAllOperation putAllOp, VersionedObjectList successfulPuts) {
final boolean isDebugEnabled = logger.isDebugEnabled();
if (cache.isCacheAtShutdownAll()) {
throw new CacheClosedException("Cache is shutting down");
}
final long startTime = PartitionedRegionStats.startTime();
// build all the msgs by bucketid
HashMap prMsgMap = putAllOp.createPRMessages();
PutAllPartialResult partialKeys = new PutAllPartialResult(putAllOp.putAllDataSize);
// clear the successfulPuts list since we're actually doing the puts here
// and the basicPutAll work was just a way to build the DPAO object
Map<Object, VersionTag> keyToVersionMap = new HashMap<Object, VersionTag>(successfulPuts.size());
successfulPuts.clearVersions();
Iterator itor = prMsgMap.entrySet().iterator();
while (itor.hasNext()) {
Map.Entry mapEntry = (Map.Entry) itor.next();
Integer bucketId = (Integer) mapEntry.getKey();
PutAllPRMessage prMsg = (PutAllPRMessage) mapEntry.getValue();
checkReadiness();
long then = 0;
if (isDebugEnabled) {
then = System.currentTimeMillis();
}
try {
VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg);
if (versions.size() > 0) {
partialKeys.addKeysAndVersions(versions);
versions.saveVersions(keyToVersionMap);
} else if (!this.concurrencyChecksEnabled) {
// no keys returned if not versioned
Set keys = prMsg.getKeys();
partialKeys.addKeys(keys);
}
} catch (PutAllPartialResultException pre) {
// sendMsgByBucket applied partial keys
if (isDebugEnabled) {
logger.debug("PR.postPutAll encountered PutAllPartialResultException, ", pre);
}
partialKeys.consolidate(pre.getResult());
} catch (Exception ex) {
// If failed at other exception
if (isDebugEnabled) {
logger.debug("PR.postPutAll encountered exception at sendMsgByBucket, ", ex);
}
@Released EntryEventImpl firstEvent = prMsg.getFirstEvent(this);
try {
partialKeys.saveFailedKey(firstEvent.getKey(), ex);
} finally {
firstEvent.release();
}
}
if (isDebugEnabled) {
long now = System.currentTimeMillis();
if ((now - then) >= 10000) {
logger.debug("PR.sendMsgByBucket took " + (now - then) + " ms");
}
}
}
this.prStats.endPutAll(startTime);
if (!keyToVersionMap.isEmpty()) {
for (Iterator it = successfulPuts.getKeys().iterator(); it.hasNext(); ) {
successfulPuts.addVersion(keyToVersionMap.get(it.next()));
}
keyToVersionMap.clear();
}
if (partialKeys.hasFailure()) {
logger.info(LocalizedMessage.create(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
if (putAllOp.isBridgeOperation()) {
if (partialKeys.getFailure() instanceof CancelException) {
throw (CancelException) partialKeys.getFailure();
} else {
throw new PutAllPartialResultException(partialKeys);
}
} else {
if (partialKeys.getFailure() instanceof RuntimeException) {
throw (RuntimeException) partialKeys.getFailure();
} else {
throw new RuntimeException(partialKeys.getFailure());
}
}
}
return -1;
}
Aggregations