use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class RemoveAll method writeReply.
protected void writeReply(Message origMsg, VersionedObjectList response, ServerConnection servConn) throws IOException {
servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
ChunkedMessage replyMsg = servConn.getChunkedResponseMessage();
replyMsg.setMessageType(MessageType.RESPONSE);
replyMsg.setTransactionId(origMsg.getTransactionId());
int listSize = (response == null) ? 0 : response.size();
if (response != null) {
response.setKeys(null);
}
if (logger.isDebugEnabled()) {
logger.debug("sending chunked response header. version list size={}{}", listSize, (logger.isTraceEnabled() ? " list=" + response : ""));
}
replyMsg.sendHeader();
if (listSize > 0) {
int chunkSize = 2 * MAXIMUM_CHUNK_SIZE;
// Chunker will stream over the list in its toData method
VersionedObjectList.Chunker chunk = new VersionedObjectList.Chunker(response, chunkSize, false, false);
for (int i = 0; i < listSize; i += chunkSize) {
boolean lastChunk = (i + chunkSize >= listSize);
replyMsg.setNumberOfParts(1);
replyMsg.setMessageType(MessageType.RESPONSE);
replyMsg.setLastChunk(lastChunk);
replyMsg.setTransactionId(origMsg.getTransactionId());
replyMsg.addObjPart(chunk);
if (logger.isDebugEnabled()) {
logger.debug("sending chunk at index {} last chunk={} numParts={}", i, lastChunk, replyMsg.getNumberOfParts());
}
replyMsg.sendChunk(servConn);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("sending only header");
}
replyMsg.addObjPart(null);
replyMsg.setLastChunk(true);
replyMsg.sendChunk(servConn);
}
servConn.setAsTrue(RESPONDED);
if (logger.isTraceEnabled()) {
logger.trace("{}: rpl tx: {}", servConn.getName(), origMsg.getTransactionId());
}
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class PutAll70 method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp) throws IOException, InterruptedException {
// copy this since we need to modify it
long start = startp;
Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
String regionName = null;
int numberOfKeys = 0;
Object key = null;
Part eventPart = null;
boolean replyWithMetaData = false;
VersionedObjectList response = null;
StringBuffer errMessage = new StringBuffer();
CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
CacheServerStats stats = serverConnection.getCacheServerStats();
// requiresResponse = true;
serverConnection.setAsTrue(REQUIRES_RESPONSE);
{
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incReadPutAllRequestTime(start - oldStart);
}
try {
// Retrieve the data from the message parts
// part 0: region name
regionNamePart = clientMessage.getPart(0);
regionName = regionNamePart.getString();
if (regionName == null) {
String putAllMsg = LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
if (region == null) {
String reason = " was not found during put request";
writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
// part 1: eventID
eventPart = clientMessage.getPart(1);
ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
// part 2: invoke callbacks (used by import)
Part callbacksPart = clientMessage.getPart(2);
boolean skipCallbacks = callbacksPart.getInt() == 1 ? true : false;
// part 3: number of keys
numberOfKeysPart = clientMessage.getPart(3);
numberOfKeys = numberOfKeysPart.getInt();
// building the map
Map map = new LinkedHashMap();
Map<Object, VersionTag> retryVersions = new LinkedHashMap<Object, VersionTag>();
// Map isObjectMap = new LinkedHashMap();
for (int i = 0; i < numberOfKeys; i++) {
keyPart = clientMessage.getPart(4 + i * 2);
key = keyPart.getStringOrObject();
if (key == null) {
String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
valuePart = clientMessage.getPart(4 + i * 2 + 1);
if (valuePart.isNull()) {
String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
// byte[] value = valuePart.getSerializedForm();
Object value;
if (valuePart.isObject()) {
// skipCallbacks configurable this code will need to be updated.
if (skipCallbacks && Token.INVALID.isSerializedValue(valuePart.getSerializedForm())) {
value = Token.INVALID;
} else {
value = CachedDeserializableFactory.create(valuePart.getSerializedForm());
}
} else {
value = valuePart.getSerializedForm();
}
// put serializedform for auth. It will be modified with auth callback
if (clientMessage.isRetry()) {
// Constuct the thread id/sequence id information for this element in the
// put all map
// The sequence id is constructed from the base sequence id and the offset
EventID entryEventId = new EventID(eventId, i);
// For PRs, the thread id assigned as a fake thread id.
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
int bucketId = pr.getKeyInfo(key).getBucketId();
long entryThreadId = ThreadIdentifier.createFakeThreadIDForBulkOp(bucketId, entryEventId.getThreadID());
entryEventId = new EventID(entryEventId.getMembershipID(), entryThreadId, entryEventId.getSequenceID());
}
VersionTag tag = findVersionTagsForRetriedBulkOp(region, entryEventId);
if (tag != null) {
retryVersions.put(key, tag);
}
// FIND THE VERSION TAG FOR THIS KEY - but how? all we have is the
// putAll eventId, not individual eventIds for entries, right?
}
map.put(key, value);
// isObjectMap.put(key, new Boolean(isObject));
}
if (clientMessage.getNumberOfParts() == (4 + 2 * numberOfKeys + 1)) {
// it means optional
// timeout has
// been added
int timeout = clientMessage.getPart(4 + 2 * numberOfKeys).getInt();
serverConnection.setRequestSpecificTimeout(timeout);
}
this.securityService.authorizeRegionWrite(regionName);
AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
if (authzRequest != null) {
if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
authzRequest.createRegionAuthorize(regionName);
} else {
PutAllOperationContext putAllContext = authzRequest.putAllAuthorize(regionName, map, null);
map = putAllContext.getMap();
if (map instanceof UpdateOnlyMap) {
map = ((UpdateOnlyMap) map).getInternalMap();
}
}
} else {
// no auth, so update the map based on isObjectMap here
/*
* Collection entries = map.entrySet(); Iterator iterator = entries.iterator(); Map.Entry
* mapEntry = null; while (iterator.hasNext()) { mapEntry = (Map.Entry)iterator.next();
* Object currkey = mapEntry.getKey(); byte[] serializedValue = (byte[])mapEntry.getValue();
* boolean isObject = ((Boolean)isObjectMap.get(currkey)).booleanValue(); if (isObject) {
* map.put(currkey, CachedDeserializableFactory.create(serializedValue)); } }
*/
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Received putAll request ({} bytes) from {} for region {}", serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
}
response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId, skipCallbacks, null);
if (!region.getConcurrencyChecksEnabled()) {
// the client only needs this if versioning is being used
response = null;
}
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
pr.clearNetworkHopData();
replyWithMetaData = true;
}
}
} catch (RegionDestroyedException rde) {
writeException(clientMessage, rde, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (ResourceException re) {
writeException(clientMessage, re, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (PutAllPartialResultException pre) {
writeException(clientMessage, pre, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (Exception ce) {
// If an interrupted exception is thrown , rethrow it
checkForInterrupt(serverConnection, ce);
// If an exception occurs during the put, preserve the connection
writeException(clientMessage, ce, false, serverConnection);
serverConnection.setAsTrue(RESPONDED);
// if (logger.fineEnabled()) {
logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION, serverConnection.getName()), ce);
// }
return;
} finally {
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incProcessPutAllTime(start - oldStart);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sending putAll70 response back to {} for region {}: {}", serverConnection.getName(), serverConnection.getSocketString(), regionName, response);
}
// Starting in 7.0.1 we do not send the keys back
if (response != null && Version.GFE_70.compareTo(serverConnection.getClientVersion()) < 0) {
if (logger.isDebugEnabled()) {
logger.debug("setting putAll keys to null");
}
response.setKeys(null);
}
// Increment statistics and write the reply
if (!replyWithMetaData) {
writeReply(clientMessage, response, serverConnection);
}
serverConnection.setAsTrue(RESPONDED);
stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class PutAll80 method writeReplyWithRefreshMetadata.
private void writeReplyWithRefreshMetadata(Message origMsg, VersionedObjectList response, ServerConnection servConn, PartitionedRegion pr, byte nwHop) throws IOException {
servConn.getCache().getCancelCriterion().checkCancelInProgress(null);
ChunkedMessage replyMsg = servConn.getChunkedResponseMessage();
replyMsg.setMessageType(MessageType.RESPONSE);
replyMsg.setTransactionId(origMsg.getTransactionId());
replyMsg.sendHeader();
int listSize = (response == null) ? 0 : response.size();
if (logger.isDebugEnabled()) {
logger.debug("sending chunked response header with metadata refresh status. Version list size = {}{}", listSize, (logger.isTraceEnabled() ? "; list=" + response : ""));
}
if (response != null) {
response.setKeys(null);
}
replyMsg.setNumberOfParts(1);
replyMsg.setTransactionId(origMsg.getTransactionId());
replyMsg.addBytesPart(new byte[] { pr.getMetadataVersion(), nwHop });
if (listSize > 0) {
replyMsg.setLastChunk(false);
replyMsg.sendChunk(servConn);
// MAXIMUM_CHUNK_SIZE
int chunkSize = 2 * MAXIMUM_CHUNK_SIZE;
// Chunker will stream over the list in its toData method
VersionedObjectList.Chunker chunk = new VersionedObjectList.Chunker(response, chunkSize, false, false);
for (int i = 0; i < listSize; i += chunkSize) {
boolean lastChunk = (i + chunkSize >= listSize);
// resets the message
replyMsg.setNumberOfParts(1);
replyMsg.setMessageType(MessageType.RESPONSE);
replyMsg.setLastChunk(lastChunk);
replyMsg.setTransactionId(origMsg.getTransactionId());
replyMsg.addObjPart(chunk);
if (logger.isDebugEnabled()) {
logger.debug("sending chunk at index {} last chunk={} numParts={}", i, lastChunk, replyMsg.getNumberOfParts());
}
replyMsg.sendChunk(servConn);
}
} else {
replyMsg.setLastChunk(true);
if (logger.isDebugEnabled()) {
logger.debug("sending first and only part of chunked message");
}
replyMsg.sendChunk(servConn);
}
pr.getPrStats().incPRMetaDataSentCount();
if (logger.isTraceEnabled()) {
logger.trace("{}: rpl with REFRESH_METADATA tx: {}", servConn.getName(), origMsg.getTransactionId());
}
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class PartitionedTXRegionStub method postRemoveAll.
@Override
public void postRemoveAll(DistributedRemoveAllOperation op, VersionedObjectList successfulOps, LocalRegion r) {
if (r.getCache().isCacheAtShutdownAll()) {
throw new CacheClosedException("Cache is shutting down");
}
PartitionedRegion pr = (PartitionedRegion) r;
final long startTime = PartitionedRegionStats.startTime();
// build all the msgs by bucketid
HashMap<Integer, RemoveAllPRMessage> prMsgMap = op.createPRMessages();
PutAllPartialResult partialKeys = new PutAllPartialResult(op.removeAllDataSize);
// this is rebuilt by this method
successfulOps.clear();
Iterator<Map.Entry<Integer, RemoveAllPRMessage>> itor = prMsgMap.entrySet().iterator();
while (itor.hasNext()) {
Map.Entry<Integer, RemoveAllPRMessage> mapEntry = itor.next();
Integer bucketId = mapEntry.getKey();
RemoveAllPRMessage prMsg = mapEntry.getValue();
pr.checkReadiness();
try {
VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg, pr);
// prMsg.saveKeySet(partialKeys);
partialKeys.addKeysAndVersions(versions);
successfulOps.addAll(versions);
} catch (PutAllPartialResultException pre) {
// sendMsgByBucket applied partial keys
partialKeys.consolidate(pre.getResult());
} catch (Exception ex) {
// If failed at other exception
@Released EntryEventImpl firstEvent = prMsg.getFirstEvent(pr);
try {
partialKeys.saveFailedKey(firstEvent.getKey(), ex);
} finally {
firstEvent.release();
}
}
}
pr.prStats.endRemoveAll(startTime);
if (partialKeys.hasFailure()) {
pr.getCache().getLoggerI18n().info(LocalizedStrings.Region_RemoveAll_Applied_PartialKeys_0_1, new Object[] { pr.getFullPath(), partialKeys });
if (op.isBridgeOperation()) {
if (partialKeys.getFailure() instanceof CancelException) {
throw (CancelException) partialKeys.getFailure();
} else {
throw new PutAllPartialResultException(partialKeys);
}
} else {
if (partialKeys.getFailure() instanceof RuntimeException) {
throw (RuntimeException) partialKeys.getFailure();
} else {
throw new RuntimeException(partialKeys.getFailure());
}
}
}
}
use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.
the class LocalRegion method basicBridgePutAll.
/**
* Called on a bridge server when it has a received a putAll command from a client.
*
* @param map a map of key->value for the entries we are putting
* @param retryVersions a map of key->version tag. If any of the entries are the result of a
* retried client event, we need to make sure we send the original version tag along with
* the event.
* @param callbackArg callback argument from client
*/
public VersionedObjectList basicBridgePutAll(Map map, Map<Object, VersionTag> retryVersions, ClientProxyMembershipID memberId, EventID eventId, boolean skipCallbacks, Object callbackArg) throws TimeoutException, CacheWriterException {
long startPut = CachePerfStats.getStatTime();
if (isGatewaySenderEnabled()) {
callbackArg = new GatewaySenderEventCallbackArgument(callbackArg);
}
@Released final EntryEventImpl event = EntryEventImpl.create(this, Operation.PUTALL_CREATE, null, null, /* new value */
callbackArg, false, /* origin remote */
memberId.getDistributedMember(), !skipCallbacks, /* generateCallbacks */
eventId);
try {
event.setContext(memberId);
DistributedPutAllOperation putAllOp = new DistributedPutAllOperation(event, map.size(), true);
try {
VersionedObjectList result = basicPutAll(map, putAllOp, retryVersions);
getCachePerfStats().endPutAll(startPut);
return result;
} finally {
putAllOp.freeOffHeapResources();
}
} finally {
event.release();
}
}
Aggregations