use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class PutAllOp method execute.
/**
* Does a region put on a server using connections from the given pool to communicate with the
* server.
*
* @param pool the pool to use to communicate with the server.
* @param region the name of the region to do the putAll on
* @param map the Map of keys and values to put
* @param eventId the event id for this putAll
*/
public static VersionedObjectList execute(ExecutablePool pool, Region region, Map map, EventID eventId, boolean skipCallbacks, int retryAttempts, Object callbackArg) {
ClientMetadataService cms = ((LocalRegion) region).getCache().getClientMetadataService();
Map<ServerLocation, HashSet> serverToFilterMap = cms.getServerToFilterMap(map.keySet(), region, true);
if (serverToFilterMap == null || serverToFilterMap.isEmpty()) {
AbstractOp op = new PutAllOpImpl(region, map, eventId, ((PoolImpl) pool).getPRSingleHopEnabled(), skipCallbacks, callbackArg);
op.initMessagePart();
return (VersionedObjectList) pool.execute(op);
}
List callableTasks = constructAndGetPutAllTasks(region, map, eventId, skipCallbacks, serverToFilterMap, (PoolImpl) pool, callbackArg);
final boolean isDebugEnabled = logger.isDebugEnabled();
if (isDebugEnabled) {
logger.debug("PutAllOp#execute : Number of putAll tasks is : {}", callableTasks.size());
}
HashMap<ServerLocation, RuntimeException> failedServers = new HashMap<ServerLocation, RuntimeException>();
PutAllPartialResult result = new PutAllPartialResult(map.size());
try {
Map<ServerLocation, Object> results = SingleHopClientExecutor.submitBulkOp(callableTasks, cms, (LocalRegion) region, failedServers);
for (Map.Entry<ServerLocation, Object> entry : results.entrySet()) {
Object value = entry.getValue();
if (value instanceof PutAllPartialResultException) {
PutAllPartialResultException pap = (PutAllPartialResultException) value;
if (isDebugEnabled) {
logger.debug("PutAll SingleHop encountered PutAllPartialResultException exception: {}, failedServers are {}", pap, failedServers.keySet());
}
result.consolidate(pap.getResult());
} else {
if (value != null) {
VersionedObjectList list = (VersionedObjectList) value;
result.addKeysAndVersions(list);
}
}
}
} catch (RuntimeException ex) {
if (isDebugEnabled) {
logger.debug("single-hop putAll encountered unexpected exception: ", ex);
}
throw ex;
}
if (!failedServers.isEmpty()) {
if (retryAttempts == 0) {
throw failedServers.values().iterator().next();
}
// add them to the partial result set
if (result.getSucceededKeysAndVersions().size() == 0) {
// if there're failed servers, we need to save the succeed keys in submitPutAll
// if retry succeeded, everything is ok, otherwise, the saved "succeeded
// keys" should be consolidated into PutAllPartialResultException
// succeedKeySet is used to send back to client in PartialResult case
// so it's not a must to use LinkedHashSet
Set succeedKeySet = new LinkedHashSet();
Set<ServerLocation> serverSet = serverToFilterMap.keySet();
for (ServerLocation server : serverSet) {
if (!failedServers.containsKey(server)) {
succeedKeySet.addAll(serverToFilterMap.get(server));
}
}
// save succeedKeys, but if retries all succeeded, discard the PutAllPartialResult
result.addKeys(succeedKeySet);
}
// send maps for the failed servers one by one instead of merging
// them into one big map. The reason is, we have to keep the same event
// ids for each sub map. There is a unit test in PutAllCSDUnitTest for
// the otherwise case.
boolean oneSubMapRetryFailed = false;
Set<ServerLocation> failedServerSet = failedServers.keySet();
for (ServerLocation failedServer : failedServerSet) {
// Throwable failedServers.values().iterator().next();
RuntimeException savedRTE = failedServers.get(failedServer);
if (savedRTE instanceof PutAllPartialResultException) {
// will not retry for PutAllPartialResultException
// but it means at least one sub map ever failed
oneSubMapRetryFailed = true;
continue;
}
Map newMap = new LinkedHashMap();
Set keySet = serverToFilterMap.get(failedServer);
for (Object key : keySet) {
newMap.put(key, map.get(key));
}
try {
VersionedObjectList v = PutAllOp.execute(pool, region, newMap, eventId, skipCallbacks, true, callbackArg);
if (v == null) {
result.addKeys(keySet);
} else {
result.addKeysAndVersions(v);
}
} catch (PutAllPartialResultException pre) {
oneSubMapRetryFailed = true;
if (logger.isDebugEnabled()) {
logger.debug("Retry failed with PutAllPartialResultException: {} Before retry: {}", pre, result.getKeyListString());
}
result.consolidate(pre.getResult());
} catch (Exception rte) {
oneSubMapRetryFailed = true;
Object firstKey = newMap.keySet().iterator().next();
result.saveFailedKey(firstKey, rte);
}
}
// If all retries succeeded, the PRE in first tries can be ignored
if (oneSubMapRetryFailed && result.hasFailure()) {
PutAllPartialResultException pre = new PutAllPartialResultException(result);
throw pre;
}
}
return result.getSucceededKeysAndVersions();
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class GetAllOp method execute.
public static VersionedObjectList execute(ExecutablePool pool, Region region, List keys, int retryAttempts, Object callback) {
AbstractOp op = new GetAllOpImpl(region.getFullPath(), keys, callback);
ClientMetadataService cms = ((LocalRegion) region).getCache().getClientMetadataService();
Map<ServerLocation, HashSet> serverToFilterMap = cms.getServerToFilterMap(keys, region, true);
if (serverToFilterMap == null || serverToFilterMap.isEmpty()) {
op.initMessagePart();
return ((VersionedObjectList) pool.execute(op)).setKeys(keys);
} else {
VersionedObjectList result = null;
ServerConnectivityException se = null;
List retryList = new ArrayList();
List callableTasks = constructGetAllTasks(region.getFullPath(), serverToFilterMap, (PoolImpl) pool, callback);
Map<ServerLocation, Object> results = SingleHopClientExecutor.submitGetAll(serverToFilterMap, callableTasks, cms, (LocalRegion) region);
for (ServerLocation server : results.keySet()) {
Object serverResult = results.get(server);
if (serverResult instanceof ServerConnectivityException) {
se = (ServerConnectivityException) serverResult;
retryList.addAll(serverToFilterMap.get(server));
} else {
if (result == null) {
result = (VersionedObjectList) serverResult;
} else {
result.addAll((VersionedObjectList) serverResult);
}
}
}
if (se != null) {
if (retryAttempts == 0) {
throw se;
} else {
VersionedObjectList retryResult = GetAllOp.execute(pool, region.getFullPath(), retryList, callback);
if (result == null) {
result = retryResult;
} else {
result.addAll(retryResult);
}
}
}
return result;
}
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class DistTXStateOnCoordinator method applyIndividualOp.
/**
* {@inheritDoc}
*
*/
@Override
protected boolean applyIndividualOp(DistTxEntryEvent dtop) throws DataLocationException {
boolean result = true;
if (dtop.op.isUpdate() || dtop.op.isCreate()) {
if (dtop.op.isPutAll()) {
assert (dtop.getPutAllOperation() != null);
// [DISTTX] TODO what do with versions next?
final VersionedObjectList versions = new VersionedObjectList(dtop.getPutAllOperation().putAllDataSize, true, dtop.region.concurrencyChecksEnabled);
postPutAll(dtop.getPutAllOperation(), versions, dtop.region);
} else {
result = putEntry(dtop, false, /* ifNew */
false, /* ifOld */
null, /* expectedOldValue */
false, /* requireOldValue */
0L, /* lastModified */
true);
}
} else if (dtop.op.isDestroy()) {
if (dtop.op.isRemoveAll()) {
assert (dtop.getRemoveAllOperation() != null);
// [DISTTX] TODO what do with versions next?
final VersionedObjectList versions = new VersionedObjectList(dtop.getRemoveAllOperation().removeAllDataSize, true, dtop.region.concurrencyChecksEnabled);
postRemoveAll(dtop.getRemoveAllOperation(), versions, dtop.region);
} else {
destroyExistingEntry(dtop, false, /* TODO [DISTTX] */
null);
}
} else if (dtop.op.isInvalidate()) {
invalidateExistingEntry(dtop, true, /* TODO [DISTTX] */
false);
} else {
logger.debug("DistTXCommitPhaseOneMessage: unsupported TX operation {}", dtop);
assert (false);
}
return result;
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class PartitionedRegion method postRemoveAllSend.
@Override
public long postRemoveAllSend(DistributedRemoveAllOperation op, VersionedObjectList successfulOps) {
final boolean isDebugEnabled = logger.isDebugEnabled();
if (cache.isCacheAtShutdownAll()) {
throw new CacheClosedException("Cache is shutting down");
}
final long startTime = PartitionedRegionStats.startTime();
// build all the msgs by bucketid
HashMap<Integer, RemoveAllPRMessage> prMsgMap = op.createPRMessages();
PutAllPartialResult partialKeys = new PutAllPartialResult(op.removeAllDataSize);
// clear the successfulOps list since we're actually doing the removes here
// and the basicRemoveAll work was just a way to build the "op" object
Map<Object, VersionTag> keyToVersionMap = new HashMap<Object, VersionTag>(successfulOps.size());
successfulOps.clearVersions();
Iterator<Map.Entry<Integer, RemoveAllPRMessage>> itor = prMsgMap.entrySet().iterator();
while (itor.hasNext()) {
Map.Entry<Integer, RemoveAllPRMessage> mapEntry = itor.next();
Integer bucketId = (Integer) mapEntry.getKey();
RemoveAllPRMessage prMsg = mapEntry.getValue();
checkReadiness();
long then = 0;
if (isDebugEnabled) {
then = System.currentTimeMillis();
}
try {
VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg);
if (versions.size() > 0) {
partialKeys.addKeysAndVersions(versions);
versions.saveVersions(keyToVersionMap);
} else if (!this.concurrencyChecksEnabled) {
// no keys returned if not versioned
Set keys = prMsg.getKeys();
partialKeys.addKeys(keys);
}
} catch (PutAllPartialResultException pre) {
// sendMsgByBucket applied partial keys
if (isDebugEnabled) {
logger.debug("PR.postRemoveAll encountered BulkOpPartialResultException, ", pre);
}
partialKeys.consolidate(pre.getResult());
} catch (Exception ex) {
// If failed at other exception
if (isDebugEnabled) {
logger.debug("PR.postRemoveAll encountered exception at sendMsgByBucket, ", ex);
}
@Released EntryEventImpl firstEvent = prMsg.getFirstEvent(this);
try {
partialKeys.saveFailedKey(firstEvent.getKey(), ex);
} finally {
firstEvent.release();
}
}
if (isDebugEnabled) {
long now = System.currentTimeMillis();
if ((now - then) >= 10000) {
logger.debug("PR.sendMsgByBucket took {} ms", (now - then));
}
}
}
this.prStats.endRemoveAll(startTime);
if (!keyToVersionMap.isEmpty()) {
for (Iterator it = successfulOps.getKeys().iterator(); it.hasNext(); ) {
successfulOps.addVersion(keyToVersionMap.get(it.next()));
}
keyToVersionMap.clear();
}
if (partialKeys.hasFailure()) {
logger.info(LocalizedMessage.create(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
if (op.isBridgeOperation()) {
if (partialKeys.getFailure() instanceof CancelException) {
throw (CancelException) partialKeys.getFailure();
} else {
throw new PutAllPartialResultException(partialKeys);
}
} else {
if (partialKeys.getFailure() instanceof RuntimeException) {
throw (RuntimeException) partialKeys.getFailure();
} else {
throw new RuntimeException(partialKeys.getFailure());
}
}
}
return -1;
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class PutAll80 method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp) throws IOException, InterruptedException {
// copy this since we need to modify it
long start = startp;
Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
String regionName = null;
int numberOfKeys = 0;
Object key = null;
Part eventPart = null;
boolean replyWithMetaData = false;
VersionedObjectList response = null;
StringBuffer errMessage = new StringBuffer();
CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
CacheServerStats stats = serverConnection.getCacheServerStats();
// requiresResponse = true;
serverConnection.setAsTrue(REQUIRES_RESPONSE);
// new in 8.0
serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
{
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incReadPutAllRequestTime(start - oldStart);
}
try {
// Retrieve the data from the message parts
// part 0: region name
regionNamePart = clientMessage.getPart(0);
regionName = regionNamePart.getString();
if (regionName == null) {
String putAllMsg = LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
if (region == null) {
String reason = " was not found during putAll request";
writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
final int BASE_PART_COUNT = getBasePartCount();
// part 1: eventID
eventPart = clientMessage.getPart(1);
ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
Breadcrumbs.setEventId(eventId);
// part 2: invoke callbacks (used by import)
Part callbacksPart = clientMessage.getPart(2);
boolean skipCallbacks = callbacksPart.getInt() == 1 ? true : false;
// part 3: flags
int flags = clientMessage.getPart(3).getInt();
boolean clientIsEmpty = (flags & PutAllOp.FLAG_EMPTY) != 0;
boolean clientHasCCEnabled = (flags & PutAllOp.FLAG_CONCURRENCY_CHECKS) != 0;
// part 4: number of keys
numberOfKeysPart = clientMessage.getPart(4);
numberOfKeys = numberOfKeysPart.getInt();
Object callbackArg = getOptionalCallbackArg(clientMessage);
if (logger.isDebugEnabled()) {
StringBuilder buffer = new StringBuilder();
buffer.append(serverConnection.getName()).append(": Received ").append(this.putAllClassName()).append(" request from ").append(serverConnection.getSocketString()).append(" for region ").append(regionName).append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ").append(numberOfKeys).append(" entries.");
logger.debug(buffer.toString());
}
// building the map
Map map = new LinkedHashMap();
Map<Object, VersionTag> retryVersions = new LinkedHashMap<Object, VersionTag>();
// Map isObjectMap = new LinkedHashMap();
for (int i = 0; i < numberOfKeys; i++) {
keyPart = clientMessage.getPart(BASE_PART_COUNT + i * 2);
key = keyPart.getStringOrObject();
if (key == null) {
String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
valuePart = clientMessage.getPart(BASE_PART_COUNT + i * 2 + 1);
if (valuePart.isNull()) {
String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
// byte[] value = valuePart.getSerializedForm();
Object value;
if (valuePart.isObject()) {
// skipCallbacks configurable this code will need to be updated.
if (skipCallbacks && Token.INVALID.isSerializedValue(valuePart.getSerializedForm())) {
value = Token.INVALID;
} else {
value = CachedDeserializableFactory.create(valuePart.getSerializedForm());
}
} else {
value = valuePart.getSerializedForm();
}
// put serializedform for auth. It will be modified with auth callback
if (clientMessage.isRetry()) {
// Constuct the thread id/sequence id information for this element in the
// put all map
// The sequence id is constructed from the base sequence id and the offset
EventID entryEventId = new EventID(eventId, i);
// For PRs, the thread id assigned as a fake thread id.
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
int bucketId = pr.getKeyInfo(key).getBucketId();
long entryThreadId = ThreadIdentifier.createFakeThreadIDForBulkOp(bucketId, entryEventId.getThreadID());
entryEventId = new EventID(entryEventId.getMembershipID(), entryThreadId, entryEventId.getSequenceID());
}
VersionTag tag = findVersionTagsForRetriedBulkOp(region, entryEventId);
if (tag != null) {
retryVersions.put(key, tag);
}
// FIND THE VERSION TAG FOR THIS KEY - but how? all we have is the
// putAll eventId, not individual eventIds for entries, right?
}
map.put(key, value);
// isObjectMap.put(key, new Boolean(isObject));
}
if (clientMessage.getNumberOfParts() == (BASE_PART_COUNT + 2 * numberOfKeys + 1)) {
// it means
// optional
// timeout has been
// added
int timeout = clientMessage.getPart(BASE_PART_COUNT + 2 * numberOfKeys).getInt();
serverConnection.setRequestSpecificTimeout(timeout);
}
this.securityService.authorizeRegionWrite(regionName);
AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
if (authzRequest != null) {
if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
authzRequest.createRegionAuthorize(regionName);
} else {
PutAllOperationContext putAllContext = authzRequest.putAllAuthorize(regionName, map, callbackArg);
map = putAllContext.getMap();
if (map instanceof UpdateOnlyMap) {
map = ((UpdateOnlyMap) map).getInternalMap();
}
callbackArg = putAllContext.getCallbackArg();
}
} else {
// no auth, so update the map based on isObjectMap here
/*
* Collection entries = map.entrySet(); Iterator iterator = entries.iterator(); Map.Entry
* mapEntry = null; while (iterator.hasNext()) { mapEntry = (Map.Entry)iterator.next();
* Object currkey = mapEntry.getKey(); byte[] serializedValue = (byte[])mapEntry.getValue();
* boolean isObject = ((Boolean)isObjectMap.get(currkey)).booleanValue(); if (isObject) {
* map.put(currkey, CachedDeserializableFactory.create(serializedValue)); } }
*/
}
response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId, skipCallbacks, callbackArg);
if (!region.getConcurrencyChecksEnabled() || clientIsEmpty || !clientHasCCEnabled) {
// has storage
if (logger.isTraceEnabled()) {
logger.trace("setting response to null. region-cc-enabled={}; clientIsEmpty={}; client-cc-enabled={}", region.getConcurrencyChecksEnabled(), clientIsEmpty, clientHasCCEnabled);
}
response = null;
}
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
pr.clearNetworkHopData();
replyWithMetaData = true;
}
}
} catch (RegionDestroyedException rde) {
writeChunkedException(clientMessage, rde, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (ResourceException re) {
writeChunkedException(clientMessage, re, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (PutAllPartialResultException pre) {
writeChunkedException(clientMessage, pre, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (Exception ce) {
// If an interrupted exception is thrown , rethrow it
checkForInterrupt(serverConnection, ce);
// If an exception occurs during the put, preserve the connection
writeChunkedException(clientMessage, ce, serverConnection);
serverConnection.setAsTrue(RESPONDED);
logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION, serverConnection.getName()), ce);
return;
} finally {
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incProcessPutAllTime(start - oldStart);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sending {} response back to {} for regin {} {}", serverConnection.getName(), putAllClassName(), serverConnection.getSocketString(), regionName, (logger.isTraceEnabled() ? ": " + response : ""));
}
// Increment statistics and write the reply
if (!replyWithMetaData) {
writeReply(clientMessage, response, serverConnection);
}
serverConnection.setAsTrue(RESPONDED);
stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
}
Aggregations