Search in sources :

Example 1 with VersionedObjectList

use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.

the class PutAllOp method execute.

/**
   * Does a region put on a server using connections from the given pool to communicate with the
   * server.
   * 
   * @param pool the pool to use to communicate with the server.
   * @param region the name of the region to do the putAll on
   * @param map the Map of keys and values to put
   * @param eventId the event id for this putAll
   */
public static VersionedObjectList execute(ExecutablePool pool, Region region, Map map, EventID eventId, boolean skipCallbacks, int retryAttempts, Object callbackArg) {
    ClientMetadataService cms = ((LocalRegion) region).getCache().getClientMetadataService();
    Map<ServerLocation, HashSet> serverToFilterMap = cms.getServerToFilterMap(map.keySet(), region, true);
    if (serverToFilterMap == null || serverToFilterMap.isEmpty()) {
        AbstractOp op = new PutAllOpImpl(region, map, eventId, ((PoolImpl) pool).getPRSingleHopEnabled(), skipCallbacks, callbackArg);
        op.initMessagePart();
        return (VersionedObjectList) pool.execute(op);
    }
    List callableTasks = constructAndGetPutAllTasks(region, map, eventId, skipCallbacks, serverToFilterMap, (PoolImpl) pool, callbackArg);
    final boolean isDebugEnabled = logger.isDebugEnabled();
    if (isDebugEnabled) {
        logger.debug("PutAllOp#execute : Number of putAll tasks is : {}", callableTasks.size());
    }
    HashMap<ServerLocation, RuntimeException> failedServers = new HashMap<ServerLocation, RuntimeException>();
    PutAllPartialResult result = new PutAllPartialResult(map.size());
    try {
        Map<ServerLocation, Object> results = SingleHopClientExecutor.submitBulkOp(callableTasks, cms, (LocalRegion) region, failedServers);
        for (Map.Entry<ServerLocation, Object> entry : results.entrySet()) {
            Object value = entry.getValue();
            if (value instanceof PutAllPartialResultException) {
                PutAllPartialResultException pap = (PutAllPartialResultException) value;
                if (isDebugEnabled) {
                    logger.debug("PutAll SingleHop encountered PutAllPartialResultException exception: {}, failedServers are {}", pap, failedServers.keySet());
                }
                result.consolidate(pap.getResult());
            } else {
                if (value != null) {
                    VersionedObjectList list = (VersionedObjectList) value;
                    result.addKeysAndVersions(list);
                }
            }
        }
    } catch (RuntimeException ex) {
        if (isDebugEnabled) {
            logger.debug("single-hop putAll encountered unexpected exception: ", ex);
        }
        throw ex;
    }
    if (!failedServers.isEmpty()) {
        if (retryAttempts == 0) {
            throw failedServers.values().iterator().next();
        }
        // add them to the partial result set
        if (result.getSucceededKeysAndVersions().size() == 0) {
            // if there're failed servers, we need to save the succeed keys in submitPutAll
            // if retry succeeded, everything is ok, otherwise, the saved "succeeded
            // keys" should be consolidated into PutAllPartialResultException
            // succeedKeySet is used to send back to client in PartialResult case
            // so it's not a must to use LinkedHashSet
            Set succeedKeySet = new LinkedHashSet();
            Set<ServerLocation> serverSet = serverToFilterMap.keySet();
            for (ServerLocation server : serverSet) {
                if (!failedServers.containsKey(server)) {
                    succeedKeySet.addAll(serverToFilterMap.get(server));
                }
            }
            // save succeedKeys, but if retries all succeeded, discard the PutAllPartialResult
            result.addKeys(succeedKeySet);
        }
        // send maps for the failed servers one by one instead of merging
        // them into one big map. The reason is, we have to keep the same event
        // ids for each sub map. There is a unit test in PutAllCSDUnitTest for
        // the otherwise case.
        boolean oneSubMapRetryFailed = false;
        Set<ServerLocation> failedServerSet = failedServers.keySet();
        for (ServerLocation failedServer : failedServerSet) {
            // Throwable failedServers.values().iterator().next();
            RuntimeException savedRTE = failedServers.get(failedServer);
            if (savedRTE instanceof PutAllPartialResultException) {
                // will not retry for PutAllPartialResultException
                // but it means at least one sub map ever failed
                oneSubMapRetryFailed = true;
                continue;
            }
            Map newMap = new LinkedHashMap();
            Set keySet = serverToFilterMap.get(failedServer);
            for (Object key : keySet) {
                newMap.put(key, map.get(key));
            }
            try {
                VersionedObjectList v = PutAllOp.execute(pool, region, newMap, eventId, skipCallbacks, true, callbackArg);
                if (v == null) {
                    result.addKeys(keySet);
                } else {
                    result.addKeysAndVersions(v);
                }
            } catch (PutAllPartialResultException pre) {
                oneSubMapRetryFailed = true;
                if (logger.isDebugEnabled()) {
                    logger.debug("Retry failed with PutAllPartialResultException: {} Before retry: {}", pre, result.getKeyListString());
                }
                result.consolidate(pre.getResult());
            } catch (Exception rte) {
                oneSubMapRetryFailed = true;
                Object firstKey = newMap.keySet().iterator().next();
                result.saveFailedKey(firstKey, rte);
            }
        }
        // If all retries succeeded, the PRE in first tries can be ignored
        if (oneSubMapRetryFailed && result.hasFailure()) {
            PutAllPartialResultException pre = new PutAllPartialResultException(result);
            throw pre;
        }
    }
    return result.getSucceededKeysAndVersions();
}
Also used : LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet) Set(java.util.Set) ServerLocation(org.apache.geode.distributed.internal.ServerLocation) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) CacheClosedException(org.apache.geode.cache.CacheClosedException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) ServerOperationException(org.apache.geode.cache.client.ServerOperationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) List(java.util.List) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet)

Example 2 with VersionedObjectList

use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.

the class GetAllOp method execute.

public static VersionedObjectList execute(ExecutablePool pool, Region region, List keys, int retryAttempts, Object callback) {
    AbstractOp op = new GetAllOpImpl(region.getFullPath(), keys, callback);
    ClientMetadataService cms = ((LocalRegion) region).getCache().getClientMetadataService();
    Map<ServerLocation, HashSet> serverToFilterMap = cms.getServerToFilterMap(keys, region, true);
    if (serverToFilterMap == null || serverToFilterMap.isEmpty()) {
        op.initMessagePart();
        return ((VersionedObjectList) pool.execute(op)).setKeys(keys);
    } else {
        VersionedObjectList result = null;
        ServerConnectivityException se = null;
        List retryList = new ArrayList();
        List callableTasks = constructGetAllTasks(region.getFullPath(), serverToFilterMap, (PoolImpl) pool, callback);
        Map<ServerLocation, Object> results = SingleHopClientExecutor.submitGetAll(serverToFilterMap, callableTasks, cms, (LocalRegion) region);
        for (ServerLocation server : results.keySet()) {
            Object serverResult = results.get(server);
            if (serverResult instanceof ServerConnectivityException) {
                se = (ServerConnectivityException) serverResult;
                retryList.addAll(serverToFilterMap.get(server));
            } else {
                if (result == null) {
                    result = (VersionedObjectList) serverResult;
                } else {
                    result.addAll((VersionedObjectList) serverResult);
                }
            }
        }
        if (se != null) {
            if (retryAttempts == 0) {
                throw se;
            } else {
                VersionedObjectList retryResult = GetAllOp.execute(pool, region.getFullPath(), retryList, callback);
                if (result == null) {
                    result = retryResult;
                } else {
                    result.addAll(retryResult);
                }
            }
        }
        return result;
    }
}
Also used : ServerConnectivityException(org.apache.geode.cache.client.ServerConnectivityException) ServerLocation(org.apache.geode.distributed.internal.ServerLocation) ArrayList(java.util.ArrayList) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) ArrayList(java.util.ArrayList) List(java.util.List) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) HashSet(java.util.HashSet)

Example 3 with VersionedObjectList

use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.

the class DistTXStateOnCoordinator method applyIndividualOp.

/**
   * {@inheritDoc}
   * 
   */
@Override
protected boolean applyIndividualOp(DistTxEntryEvent dtop) throws DataLocationException {
    boolean result = true;
    if (dtop.op.isUpdate() || dtop.op.isCreate()) {
        if (dtop.op.isPutAll()) {
            assert (dtop.getPutAllOperation() != null);
            // [DISTTX] TODO what do with versions next?
            final VersionedObjectList versions = new VersionedObjectList(dtop.getPutAllOperation().putAllDataSize, true, dtop.region.concurrencyChecksEnabled);
            postPutAll(dtop.getPutAllOperation(), versions, dtop.region);
        } else {
            result = putEntry(dtop, false, /* ifNew */
            false, /* ifOld */
            null, /* expectedOldValue */
            false, /* requireOldValue */
            0L, /* lastModified */
            true);
        }
    } else if (dtop.op.isDestroy()) {
        if (dtop.op.isRemoveAll()) {
            assert (dtop.getRemoveAllOperation() != null);
            // [DISTTX] TODO what do with versions next?
            final VersionedObjectList versions = new VersionedObjectList(dtop.getRemoveAllOperation().removeAllDataSize, true, dtop.region.concurrencyChecksEnabled);
            postRemoveAll(dtop.getRemoveAllOperation(), versions, dtop.region);
        } else {
            destroyExistingEntry(dtop, false, /* TODO [DISTTX] */
            null);
        }
    } else if (dtop.op.isInvalidate()) {
        invalidateExistingEntry(dtop, true, /* TODO [DISTTX] */
        false);
    } else {
        logger.debug("DistTXCommitPhaseOneMessage: unsupported TX operation {}", dtop);
        assert (false);
    }
    return result;
}
Also used : VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList)

Example 4 with VersionedObjectList

use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.

the class PartitionedRegion method postRemoveAllSend.

@Override
public long postRemoveAllSend(DistributedRemoveAllOperation op, VersionedObjectList successfulOps) {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    if (cache.isCacheAtShutdownAll()) {
        throw new CacheClosedException("Cache is shutting down");
    }
    final long startTime = PartitionedRegionStats.startTime();
    // build all the msgs by bucketid
    HashMap<Integer, RemoveAllPRMessage> prMsgMap = op.createPRMessages();
    PutAllPartialResult partialKeys = new PutAllPartialResult(op.removeAllDataSize);
    // clear the successfulOps list since we're actually doing the removes here
    // and the basicRemoveAll work was just a way to build the "op" object
    Map<Object, VersionTag> keyToVersionMap = new HashMap<Object, VersionTag>(successfulOps.size());
    successfulOps.clearVersions();
    Iterator<Map.Entry<Integer, RemoveAllPRMessage>> itor = prMsgMap.entrySet().iterator();
    while (itor.hasNext()) {
        Map.Entry<Integer, RemoveAllPRMessage> mapEntry = itor.next();
        Integer bucketId = (Integer) mapEntry.getKey();
        RemoveAllPRMessage prMsg = mapEntry.getValue();
        checkReadiness();
        long then = 0;
        if (isDebugEnabled) {
            then = System.currentTimeMillis();
        }
        try {
            VersionedObjectList versions = sendMsgByBucket(bucketId, prMsg);
            if (versions.size() > 0) {
                partialKeys.addKeysAndVersions(versions);
                versions.saveVersions(keyToVersionMap);
            } else if (!this.concurrencyChecksEnabled) {
                // no keys returned if not versioned
                Set keys = prMsg.getKeys();
                partialKeys.addKeys(keys);
            }
        } catch (PutAllPartialResultException pre) {
            // sendMsgByBucket applied partial keys
            if (isDebugEnabled) {
                logger.debug("PR.postRemoveAll encountered BulkOpPartialResultException, ", pre);
            }
            partialKeys.consolidate(pre.getResult());
        } catch (Exception ex) {
            // If failed at other exception
            if (isDebugEnabled) {
                logger.debug("PR.postRemoveAll encountered exception at sendMsgByBucket, ", ex);
            }
            @Released EntryEventImpl firstEvent = prMsg.getFirstEvent(this);
            try {
                partialKeys.saveFailedKey(firstEvent.getKey(), ex);
            } finally {
                firstEvent.release();
            }
        }
        if (isDebugEnabled) {
            long now = System.currentTimeMillis();
            if ((now - then) >= 10000) {
                logger.debug("PR.sendMsgByBucket took {} ms", (now - then));
            }
        }
    }
    this.prStats.endRemoveAll(startTime);
    if (!keyToVersionMap.isEmpty()) {
        for (Iterator it = successfulOps.getKeys().iterator(); it.hasNext(); ) {
            successfulOps.addVersion(keyToVersionMap.get(it.next()));
        }
        keyToVersionMap.clear();
    }
    if (partialKeys.hasFailure()) {
        logger.info(LocalizedMessage.create(LocalizedStrings.Region_PutAll_Applied_PartialKeys_0_1, new Object[] { getFullPath(), partialKeys }));
        if (op.isBridgeOperation()) {
            if (partialKeys.getFailure() instanceof CancelException) {
                throw (CancelException) partialKeys.getFailure();
            } else {
                throw new PutAllPartialResultException(partialKeys);
            }
        } else {
            if (partialKeys.getFailure() instanceof RuntimeException) {
                throw (RuntimeException) partialKeys.getFailure();
            } else {
                throw new RuntimeException(partialKeys.getFailure());
            }
        }
    }
    return -1;
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) ResultsSet(org.apache.geode.cache.query.internal.ResultsSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) RemoveAllPRMessage(org.apache.geode.internal.cache.partitioned.RemoveAllPRMessage) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) CacheClosedException(org.apache.geode.cache.CacheClosedException) TimeoutException(org.apache.geode.cache.TimeoutException) IndexCreationException(org.apache.geode.cache.query.IndexCreationException) NameResolutionException(org.apache.geode.cache.query.NameResolutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) EntryNotFoundException(org.apache.geode.cache.EntryNotFoundException) InternalGemFireException(org.apache.geode.InternalGemFireException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) TransactionDataRebalancedException(org.apache.geode.cache.TransactionDataRebalancedException) LockServiceDestroyedException(org.apache.geode.distributed.LockServiceDestroyedException) GatewaySenderException(org.apache.geode.internal.cache.wan.GatewaySenderException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) IOException(java.io.IOException) CacheException(org.apache.geode.cache.CacheException) GatewaySenderConfigurationException(org.apache.geode.internal.cache.wan.GatewaySenderConfigurationException) ExecutionException(java.util.concurrent.ExecutionException) ReplyException(org.apache.geode.distributed.internal.ReplyException) IndexNameConflictException(org.apache.geode.cache.query.IndexNameConflictException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) IndexExistsException(org.apache.geode.cache.query.IndexExistsException) FunctionDomainException(org.apache.geode.cache.query.FunctionDomainException) EntryExistsException(org.apache.geode.cache.EntryExistsException) PartitionedRegionDistributionException(org.apache.geode.cache.PartitionedRegionDistributionException) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) FunctionException(org.apache.geode.cache.execute.FunctionException) CacheLoaderException(org.apache.geode.cache.CacheLoaderException) NoSuchElementException(java.util.NoSuchElementException) QueryException(org.apache.geode.cache.query.QueryException) PartitionNotAvailableException(org.apache.geode.cache.partition.PartitionNotAvailableException) LowMemoryException(org.apache.geode.cache.LowMemoryException) InternalFunctionInvocationTargetException(org.apache.geode.internal.cache.execute.InternalFunctionInvocationTargetException) IndexInvalidException(org.apache.geode.cache.query.IndexInvalidException) PRLocallyDestroyedException(org.apache.geode.internal.cache.partitioned.PRLocallyDestroyedException) RegionExistsException(org.apache.geode.cache.RegionExistsException) CancelException(org.apache.geode.CancelException) DiskAccessException(org.apache.geode.cache.DiskAccessException) CacheWriterException(org.apache.geode.cache.CacheWriterException) TransactionException(org.apache.geode.cache.TransactionException) CacheClosedException(org.apache.geode.cache.CacheClosedException) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) MultiIndexCreationException(org.apache.geode.cache.query.MultiIndexCreationException) TransactionDataNotColocatedException(org.apache.geode.cache.TransactionDataNotColocatedException) EmptyRegionFunctionException(org.apache.geode.cache.execute.EmptyRegionFunctionException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) PREntriesIterator(org.apache.geode.internal.cache.partitioned.PREntriesIterator) Iterator(java.util.Iterator) CancelException(org.apache.geode.CancelException) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashMap(java.util.HashMap)

Example 5 with VersionedObjectList

use of org.apache.geode.internal.cache.tier.sockets.VersionedObjectList in project geode by apache.

the class PutAll80 method cmdExecute.

@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp) throws IOException, InterruptedException {
    // copy this since we need to modify it
    long start = startp;
    Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
    String regionName = null;
    int numberOfKeys = 0;
    Object key = null;
    Part eventPart = null;
    boolean replyWithMetaData = false;
    VersionedObjectList response = null;
    StringBuffer errMessage = new StringBuffer();
    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
    CacheServerStats stats = serverConnection.getCacheServerStats();
    // requiresResponse = true;
    serverConnection.setAsTrue(REQUIRES_RESPONSE);
    // new in 8.0
    serverConnection.setAsTrue(REQUIRES_CHUNKED_RESPONSE);
    {
        long oldStart = start;
        start = DistributionStats.getStatTime();
        stats.incReadPutAllRequestTime(start - oldStart);
    }
    try {
        // Retrieve the data from the message parts
        // part 0: region name
        regionNamePart = clientMessage.getPart(0);
        regionName = regionNamePart.getString();
        if (regionName == null) {
            String putAllMsg = LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
            logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
            errMessage.append(putAllMsg);
            writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
            serverConnection.setAsTrue(RESPONDED);
            return;
        }
        LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
        if (region == null) {
            String reason = " was not found during putAll request";
            writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
            serverConnection.setAsTrue(RESPONDED);
            return;
        }
        final int BASE_PART_COUNT = getBasePartCount();
        // part 1: eventID
        eventPart = clientMessage.getPart(1);
        ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
        long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
        long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
        EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
        Breadcrumbs.setEventId(eventId);
        // part 2: invoke callbacks (used by import)
        Part callbacksPart = clientMessage.getPart(2);
        boolean skipCallbacks = callbacksPart.getInt() == 1 ? true : false;
        // part 3: flags
        int flags = clientMessage.getPart(3).getInt();
        boolean clientIsEmpty = (flags & PutAllOp.FLAG_EMPTY) != 0;
        boolean clientHasCCEnabled = (flags & PutAllOp.FLAG_CONCURRENCY_CHECKS) != 0;
        // part 4: number of keys
        numberOfKeysPart = clientMessage.getPart(4);
        numberOfKeys = numberOfKeysPart.getInt();
        Object callbackArg = getOptionalCallbackArg(clientMessage);
        if (logger.isDebugEnabled()) {
            StringBuilder buffer = new StringBuilder();
            buffer.append(serverConnection.getName()).append(": Received ").append(this.putAllClassName()).append(" request from ").append(serverConnection.getSocketString()).append(" for region ").append(regionName).append(callbackArg != null ? (" callbackArg " + callbackArg) : "").append(" with ").append(numberOfKeys).append(" entries.");
            logger.debug(buffer.toString());
        }
        // building the map
        Map map = new LinkedHashMap();
        Map<Object, VersionTag> retryVersions = new LinkedHashMap<Object, VersionTag>();
        // Map isObjectMap = new LinkedHashMap();
        for (int i = 0; i < numberOfKeys; i++) {
            keyPart = clientMessage.getPart(BASE_PART_COUNT + i * 2);
            key = keyPart.getStringOrObject();
            if (key == null) {
                String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
                logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
                errMessage.append(putAllMsg);
                writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
                serverConnection.setAsTrue(RESPONDED);
                return;
            }
            valuePart = clientMessage.getPart(BASE_PART_COUNT + i * 2 + 1);
            if (valuePart.isNull()) {
                String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
                logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
                errMessage.append(putAllMsg);
                writeChunkedErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
                serverConnection.setAsTrue(RESPONDED);
                return;
            }
            // byte[] value = valuePart.getSerializedForm();
            Object value;
            if (valuePart.isObject()) {
                // skipCallbacks configurable this code will need to be updated.
                if (skipCallbacks && Token.INVALID.isSerializedValue(valuePart.getSerializedForm())) {
                    value = Token.INVALID;
                } else {
                    value = CachedDeserializableFactory.create(valuePart.getSerializedForm());
                }
            } else {
                value = valuePart.getSerializedForm();
            }
            // put serializedform for auth. It will be modified with auth callback
            if (clientMessage.isRetry()) {
                // Constuct the thread id/sequence id information for this element in the
                // put all map
                // The sequence id is constructed from the base sequence id and the offset
                EventID entryEventId = new EventID(eventId, i);
                // For PRs, the thread id assigned as a fake thread id.
                if (region instanceof PartitionedRegion) {
                    PartitionedRegion pr = (PartitionedRegion) region;
                    int bucketId = pr.getKeyInfo(key).getBucketId();
                    long entryThreadId = ThreadIdentifier.createFakeThreadIDForBulkOp(bucketId, entryEventId.getThreadID());
                    entryEventId = new EventID(entryEventId.getMembershipID(), entryThreadId, entryEventId.getSequenceID());
                }
                VersionTag tag = findVersionTagsForRetriedBulkOp(region, entryEventId);
                if (tag != null) {
                    retryVersions.put(key, tag);
                }
            // FIND THE VERSION TAG FOR THIS KEY - but how? all we have is the
            // putAll eventId, not individual eventIds for entries, right?
            }
            map.put(key, value);
        // isObjectMap.put(key, new Boolean(isObject));
        }
        if (clientMessage.getNumberOfParts() == (BASE_PART_COUNT + 2 * numberOfKeys + 1)) {
            // it means
            // optional
            // timeout has been
            // added
            int timeout = clientMessage.getPart(BASE_PART_COUNT + 2 * numberOfKeys).getInt();
            serverConnection.setRequestSpecificTimeout(timeout);
        }
        this.securityService.authorizeRegionWrite(regionName);
        AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
        if (authzRequest != null) {
            if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
                authzRequest.createRegionAuthorize(regionName);
            } else {
                PutAllOperationContext putAllContext = authzRequest.putAllAuthorize(regionName, map, callbackArg);
                map = putAllContext.getMap();
                if (map instanceof UpdateOnlyMap) {
                    map = ((UpdateOnlyMap) map).getInternalMap();
                }
                callbackArg = putAllContext.getCallbackArg();
            }
        } else {
        // no auth, so update the map based on isObjectMap here
        /*
         * Collection entries = map.entrySet(); Iterator iterator = entries.iterator(); Map.Entry
         * mapEntry = null; while (iterator.hasNext()) { mapEntry = (Map.Entry)iterator.next();
         * Object currkey = mapEntry.getKey(); byte[] serializedValue = (byte[])mapEntry.getValue();
         * boolean isObject = ((Boolean)isObjectMap.get(currkey)).booleanValue(); if (isObject) {
         * map.put(currkey, CachedDeserializableFactory.create(serializedValue)); } }
         */
        }
        response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId, skipCallbacks, callbackArg);
        if (!region.getConcurrencyChecksEnabled() || clientIsEmpty || !clientHasCCEnabled) {
            // has storage
            if (logger.isTraceEnabled()) {
                logger.trace("setting response to null. region-cc-enabled={}; clientIsEmpty={}; client-cc-enabled={}", region.getConcurrencyChecksEnabled(), clientIsEmpty, clientHasCCEnabled);
            }
            response = null;
        }
        if (region instanceof PartitionedRegion) {
            PartitionedRegion pr = (PartitionedRegion) region;
            if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
                writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
                pr.clearNetworkHopData();
                replyWithMetaData = true;
            }
        }
    } catch (RegionDestroyedException rde) {
        writeChunkedException(clientMessage, rde, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        return;
    } catch (ResourceException re) {
        writeChunkedException(clientMessage, re, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        return;
    } catch (PutAllPartialResultException pre) {
        writeChunkedException(clientMessage, pre, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        return;
    } catch (Exception ce) {
        // If an interrupted exception is thrown , rethrow it
        checkForInterrupt(serverConnection, ce);
        // If an exception occurs during the put, preserve the connection
        writeChunkedException(clientMessage, ce, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION, serverConnection.getName()), ce);
        return;
    } finally {
        long oldStart = start;
        start = DistributionStats.getStatTime();
        stats.incProcessPutAllTime(start - oldStart);
    }
    if (logger.isDebugEnabled()) {
        logger.debug("{}: Sending {} response back to {} for regin {} {}", serverConnection.getName(), putAllClassName(), serverConnection.getSocketString(), regionName, (logger.isTraceEnabled() ? ": " + response : ""));
    }
    // Increment statistics and write the reply
    if (!replyWithMetaData) {
        writeReply(clientMessage, response, serverConnection);
    }
    serverConnection.setAsTrue(RESPONDED);
    stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
}
Also used : UpdateOnlyMap(org.apache.geode.cache.operations.internal.UpdateOnlyMap) AuthorizeRequest(org.apache.geode.internal.security.AuthorizeRequest) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) LocalRegion(org.apache.geode.internal.cache.LocalRegion) LinkedHashMap(java.util.LinkedHashMap) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) CachedRegionHelper(org.apache.geode.internal.cache.tier.CachedRegionHelper) PutAllOperationContext(org.apache.geode.cache.operations.PutAllOperationContext) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) ResourceException(org.apache.geode.cache.ResourceException) ByteBuffer(java.nio.ByteBuffer) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) IOException(java.io.IOException) ResourceException(org.apache.geode.cache.ResourceException) CacheServerStats(org.apache.geode.internal.cache.tier.sockets.CacheServerStats) Part(org.apache.geode.internal.cache.tier.sockets.Part) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) EventID(org.apache.geode.internal.cache.EventID) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) UpdateOnlyMap(org.apache.geode.cache.operations.internal.UpdateOnlyMap)

Aggregations

VersionedObjectList (org.apache.geode.internal.cache.tier.sockets.VersionedObjectList)37 HashMap (java.util.HashMap)13 Map (java.util.Map)12 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)12 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)12 CacheClosedException (org.apache.geode.cache.CacheClosedException)10 PutAllPartialResult (org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult)10 ArrayList (java.util.ArrayList)9 Iterator (java.util.Iterator)9 List (java.util.List)9 CancelException (org.apache.geode.CancelException)9 CacheException (org.apache.geode.cache.CacheException)9 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)9 IOException (java.io.IOException)8 PutAllPartialResultException (org.apache.geode.internal.cache.PutAllPartialResultException)8 ConcurrentCacheModificationException (org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException)8 Released (org.apache.geode.internal.offheap.annotations.Released)8 ExecutionException (java.util.concurrent.ExecutionException)7 InternalGemFireException (org.apache.geode.InternalGemFireException)7 CacheWriterException (org.apache.geode.cache.CacheWriterException)7