use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class AbstractRegionMap method processAndGenerateTXVersionTag.
/**
* called from txApply* methods to process and generate versionTags.
*/
private void processAndGenerateTXVersionTag(final LocalRegion owner, EntryEventImpl cbEvent, RegionEntry re, TXEntryState txEntryState) {
if (shouldPerformConcurrencyChecks(owner, cbEvent)) {
try {
if (txEntryState != null && txEntryState.getRemoteVersionTag() != null) {
// to generate a version based on a remote VersionTag, we will
// have to put the remote versionTag in the regionEntry
VersionTag remoteTag = txEntryState.getRemoteVersionTag();
if (re instanceof VersionStamp) {
VersionStamp stamp = (VersionStamp) re;
stamp.setVersions(remoteTag);
}
}
processVersionTag(re, cbEvent);
} catch (ConcurrentCacheModificationException ignore) {
// ignore this execption, however invoke callbacks for this operation
}
// just apply it and not regenerate it in phase-2 commit
if (cbEvent != null && txEntryState != null && txEntryState.getDistTxEntryStates() != null) {
cbEvent.setNextRegionVersion(txEntryState.getDistTxEntryStates().getRegionVersion());
}
// cbEvent.setNextRegionVersion(txEntryState.getNextRegionVersion());
owner.generateAndSetVersionTag(cbEvent, re);
}
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class DistributedCacheOperation method initMessage.
protected void initMessage(CacheOperationMessage msg, DirectReplyProcessor p) {
msg.regionPath = getRegion().getFullPath();
msg.processorId = p == null ? 0 : p.getProcessorId();
msg.processor = p;
if (this.event.getOperation().isEntry()) {
EntryEventImpl entryEvent = getEvent();
msg.callbackArg = entryEvent.getRawCallbackArgument();
msg.possibleDuplicate = entryEvent.isPossibleDuplicate();
VersionTag tag = entryEvent.getVersionTag();
msg.setInhibitNotificationsBit(entryEvent.inhibitAllNotifications());
if (tag != null && tag.hasValidVersion()) {
msg.setVersionTag(tag);
}
} else {
msg.callbackArg = ((RegionEventImpl) this.event).getRawCallbackArgument();
}
msg.op = this.event.getOperation();
msg.owner = this;
msg.regionAllowsConflation = getRegion().getEnableAsyncConflation();
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class DistributedRegion method fetchRemoteVersionTag.
/**
* Fetch Version for the given key from a remote replicate member.
*
* @throws EntryNotFoundException if the entry is not found on replicate member
* @return VersionTag for the key
*/
protected VersionTag fetchRemoteVersionTag(Object key) {
VersionTag tag = null;
assert this.dataPolicy != DataPolicy.REPLICATE;
final TXStateProxy tx = cache.getTXMgr().internalSuspend();
try {
boolean retry = true;
InternalDistributedMember member = getRandomReplicate();
while (retry) {
try {
if (member == null) {
break;
}
FetchVersionResponse response = RemoteFetchVersionMessage.send(member, this, key);
tag = response.waitForResponse();
retry = false;
} catch (RemoteOperationException e) {
member = getRandomReplicate();
if (member != null) {
if (logger.isDebugEnabled()) {
logger.debug("Retrying RemoteFetchVersionMessage on member:{}", member);
}
}
}
}
} finally {
if (tx != null) {
cache.getTXMgr().internalResume(tx);
}
}
return tag;
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class PartitionedRegion method postRemoveAllSend.
@Override
public long postRemoveAllSend(DistributedRemoveAllOperation op, VersionedObjectList successfulOps) {
final boolean isDebugEnabled = logger.isDebugEnabled();
if (cache.isCacheAtShutdownAll()) {
throw new CacheClosedException("Cache is shutting down");
}
final long startTime = PartitionedRegionStats.startTime();
// build all the msgs by bucketid
HashMap<Integer, RemoveAllPRMessage> prMsgMap = op.createPRMessages();
PutAllPartialResult partialKeys = new PutAllPartialResult(op.removeAllDataSize);
// clear the successfulOps list since we're actually doing the removes here
// and the basicRemoveAll work was just a way to build the "op" object
Map<Object, VersionTag> keyToVersionMap = new HashMap<Object, VersionTag>(successfulOps.size());
successfulOps.clearVersions();
Iterator<Map.Entry<Integer, RemoveAllPRMessage>> itor = prMsgMap.entrySet().iterator();
while (itor.hasNext()) {
Map.Entry<Integer, RemoveAllPRMessage> mapEntry = itor.next();
Integer bucketId = (Integer) mapEntry.getKey();
RemoveAllPRMessage prMsg = mapEntry.getValue();
checkReadiness();
long then = 0;
if (isDebugEnabled) {
then = System.currentTimeMillis();
}
try {
VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg);
if (versions.size() > 0) {
partialKeys.addKeysAndVersions(versions);
versions.saveVersions(keyToVersionMap);
} else if (!this.concurrencyChecksEnabled) {
// no keys returned if not versioned
Set keys = prMsg.getKeys();
partialKeys.addKeys(keys);
}
} catch (PutAllPartialResultException pre) {
// sendMsgByBucket applied partial keys
if (isDebugEnabled) {
logger.debug("PR.postRemoveAll encountered BulkOpPartialResultException, ", pre);
}
partialKeys.consolidate(pre.getResult());
} catch (Exception ex) {
// If failed at other exception
if (isDebugEnabled) {
logger.debug("PR.postRemoveAll encountered exception at sendMsgByBucket, ", ex);
}
@Released EntryEventImpl firstEvent = prMsg.getFirstEvent(this);
try {
partialKeys.saveFailedKey(firstEvent.getKey(), ex);
} finally {
firstEvent.release();
}
}
if (isDebugEnabled) {
long now = System.currentTimeMillis();
if ((now - then) >= 10000) {
logger.debug("PR.sendMsgByBucket took {} ms", (now - then));
}
}
}
this.prStats.endRemoveAll(startTime);
if (!keyToVersionMap.isEmpty()) {
for (Iterator it = successfulOps.getKeys().iterator(); it.hasNext(); ) {
successfulOps.addVersion(keyToVersionMap.get(it.next()));
}
keyToVersionMap.clear();
}
if (partialKeys.hasFailure()) {
logger.info(LocalizedMessage.create(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
if (op.isBridgeOperation()) {
if (partialKeys.getFailure() instanceof CancelException) {
throw (CancelException) partialKeys.getFailure();
} else {
throw new PutAllPartialResultException(partialKeys);
}
} else {
if (partialKeys.getFailure() instanceof RuntimeException) {
throw (RuntimeException) partialKeys.getFailure();
} else {
throw new RuntimeException(partialKeys.getFailure());
}
}
}
return -1;
}
use of org.apache.geode.internal.cache.versions.VersionTag in project geode by apache.
the class PutAll80 method cmdExecute.
@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp) throws IOException, InterruptedException {
// copy this since we need to modify it
long start = startp;
Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
String regionName = null;
int numberOfKeys = 0;
Object key = null;
Part eventPart = null;
boolean replyWithMetaData = false;
VersionedObjectList response = null;
StringBuffer errMessage = new StringBuffer();
CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
CacheServerStats stats = serverConnection.getCacheServerStats();
// requiresResponse = true;
serverConnection.setAsTrue(REQUIRES_RESPONSE);
// new in 8.0
serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
{
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incReadPutAllRequestTime(start - oldStart);
}
try {
// Retrieve the data from the message parts
// part 0: region name
regionNamePart = clientMessage.getPart(0);
regionName = regionNamePart.getString();
if (regionName == null) {
String putAllMsg = LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
if (region == null) {
String reason = " was not found during putAll request";
writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
final int BASE_PART_COUNT = getBasePartCount();
// part 1: eventID
eventPart = clientMessage.getPart(1);
ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
Breadcrumbs.setEventId(eventId);
// part 2: invoke callbacks (used by import)
Part callbacksPart = clientMessage.getPart(2);
boolean skipCallbacks = callbacksPart.getInt() == 1 ? true : false;
// part 3: flags
int flags = clientMessage.getPart(3).getInt();
boolean clientIsEmpty = (flags & PutAllOp.FLAG_EMPTY) != 0;
boolean clientHasCCEnabled = (flags & PutAllOp.FLAG_CONCURRENCY_CHECKS) != 0;
// part 4: number of keys
numberOfKeysPart = clientMessage.getPart(4);
numberOfKeys = numberOfKeysPart.getInt();
Object callbackArg = getOptionalCallbackArg(clientMessage);
if (logger.isDebugEnabled()) {
StringBuilder buffer = new StringBuilder();
buffer.append(serverConnection.getName()).append(": Received ").append(this.putAllClassName()).append(" request from ").append(serverConnection.getSocketString()).append(" for region ").append(regionName).append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ").append(numberOfKeys).append(" entries.");
logger.debug(buffer.toString());
}
// building the map
Map map = new LinkedHashMap();
Map<Object, VersionTag> retryVersions = new LinkedHashMap<Object, VersionTag>();
// Map isObjectMap = new LinkedHashMap();
for (int i = 0; i < numberOfKeys; i++) {
keyPart = clientMessage.getPart(BASE_PART_COUNT + i * 2);
key = keyPart.getStringOrObject();
if (key == null) {
String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
valuePart = clientMessage.getPart(BASE_PART_COUNT + i * 2 + 1);
if (valuePart.isNull()) {
String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
errMessage.append(putAllMsg);
writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
}
// byte[] value = valuePart.getSerializedForm();
Object value;
if (valuePart.isObject()) {
// skipCallbacks configurable this code will need to be updated.
if (skipCallbacks && Token.INVALID.isSerializedValue(valuePart.getSerializedForm())) {
value = Token.INVALID;
} else {
value = CachedDeserializableFactory.create(valuePart.getSerializedForm());
}
} else {
value = valuePart.getSerializedForm();
}
// put serializedform for auth. It will be modified with auth callback
if (clientMessage.isRetry()) {
// Constuct the thread id/sequence id information for this element in the
// put all map
// The sequence id is constructed from the base sequence id and the offset
EventID entryEventId = new EventID(eventId, i);
// For PRs, the thread id assigned as a fake thread id.
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
int bucketId = pr.getKeyInfo(key).getBucketId();
long entryThreadId = ThreadIdentifier.createFakeThreadIDForBulkOp(bucketId, entryEventId.getThreadID());
entryEventId = new EventID(entryEventId.getMembershipID(), entryThreadId, entryEventId.getSequenceID());
}
VersionTag tag = findVersionTagsForRetriedBulkOp(region, entryEventId);
if (tag != null) {
retryVersions.put(key, tag);
}
// FIND THE VERSION TAG FOR THIS KEY - but how? all we have is the
// putAll eventId, not individual eventIds for entries, right?
}
map.put(key, value);
// isObjectMap.put(key, new Boolean(isObject));
}
if (clientMessage.getNumberOfParts() == (BASE_PART_COUNT + 2 * numberOfKeys + 1)) {
// it means
// optional
// timeout has been
// added
int timeout = clientMessage.getPart(BASE_PART_COUNT + 2 * numberOfKeys).getInt();
serverConnection.setRequestSpecificTimeout(timeout);
}
this.securityService.authorizeRegionWrite(regionName);
AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
if (authzRequest != null) {
if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
authzRequest.createRegionAuthorize(regionName);
} else {
PutAllOperationContext putAllContext = authzRequest.putAllAuthorize(regionName, map, callbackArg);
map = putAllContext.getMap();
if (map instanceof UpdateOnlyMap) {
map = ((UpdateOnlyMap) map).getInternalMap();
}
callbackArg = putAllContext.getCallbackArg();
}
} else {
// no auth, so update the map based on isObjectMap here
/*
* Collection entries = map.entrySet(); Iterator iterator = entries.iterator(); Map.Entry
* mapEntry = null; while (iterator.hasNext()) { mapEntry = (Map.Entry)iterator.next();
* Object currkey = mapEntry.getKey(); byte[] serializedValue = (byte[])mapEntry.getValue();
* boolean isObject = ((Boolean)isObjectMap.get(currkey)).booleanValue(); if (isObject) {
* map.put(currkey, CachedDeserializableFactory.create(serializedValue)); } }
*/
}
response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId, skipCallbacks, callbackArg);
if (!region.getConcurrencyChecksEnabled() || clientIsEmpty || !clientHasCCEnabled) {
// has storage
if (logger.isTraceEnabled()) {
logger.trace("setting response to null. region-cc-enabled={}; clientIsEmpty={}; client-cc-enabled={}", region.getConcurrencyChecksEnabled(), clientIsEmpty, clientHasCCEnabled);
}
response = null;
}
if (region instanceof PartitionedRegion) {
PartitionedRegion pr = (PartitionedRegion) region;
if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
pr.clearNetworkHopData();
replyWithMetaData = true;
}
}
} catch (RegionDestroyedException rde) {
writeChunkedException(clientMessage, rde, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (ResourceException re) {
writeChunkedException(clientMessage, re, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (PutAllPartialResultException pre) {
writeChunkedException(clientMessage, pre, serverConnection);
serverConnection.setAsTrue(RESPONDED);
return;
} catch (Exception ce) {
// If an interrupted exception is thrown , rethrow it
checkForInterrupt(serverConnection, ce);
// If an exception occurs during the put, preserve the connection
writeChunkedException(clientMessage, ce, serverConnection);
serverConnection.setAsTrue(RESPONDED);
logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION, serverConnection.getName()), ce);
return;
} finally {
long oldStart = start;
start = DistributionStats.getStatTime();
stats.incProcessPutAllTime(start - oldStart);
}
if (logger.isDebugEnabled()) {
logger.debug("{}: Sending {} response back to {} for regin {} {}", serverConnection.getName(), putAllClassName(), serverConnection.getSocketString(), regionName, (logger.isTraceEnabled() ? ": " + response : ""));
}
// Increment statistics and write the reply
if (!replyWithMetaData) {
writeReply(clientMessage, response, serverConnection);
}
serverConnection.setAsTrue(RESPONDED);
stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
}
Aggregations