Search in sources :

Example 96 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class PutAll70 method cmdExecute.

@Override
public void cmdExecute(Message clientMessage, ServerConnection serverConnection, long startp) throws IOException, InterruptedException {
    // copy this since we need to modify it
    long start = startp;
    Part regionNamePart = null, numberOfKeysPart = null, keyPart = null, valuePart = null;
    String regionName = null;
    int numberOfKeys = 0;
    Object key = null;
    Part eventPart = null;
    boolean replyWithMetaData = false;
    VersionedObjectList response = null;
    StringBuffer errMessage = new StringBuffer();
    CachedRegionHelper crHelper = serverConnection.getCachedRegionHelper();
    CacheServerStats stats = serverConnection.getCacheServerStats();
    // requiresResponse = true;
    serverConnection.setAsTrue(REQUIRES_RESPONSE);
    {
        long oldStart = start;
        start = DistributionStats.getStatTime();
        stats.incReadPutAllRequestTime(start - oldStart);
    }
    try {
        // Retrieve the data from the message parts
        // part 0: region name
        regionNamePart = clientMessage.getPart(0);
        regionName = regionNamePart.getString();
        if (regionName == null) {
            String putAllMsg = LocalizedStrings.PutAll_THE_INPUT_REGION_NAME_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
            logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
            errMessage.append(putAllMsg);
            writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
            serverConnection.setAsTrue(RESPONDED);
            return;
        }
        LocalRegion region = (LocalRegion) crHelper.getRegion(regionName);
        if (region == null) {
            String reason = " was not found during put request";
            writeRegionDestroyedEx(clientMessage, regionName, reason, serverConnection);
            serverConnection.setAsTrue(RESPONDED);
            return;
        }
        // part 1: eventID
        eventPart = clientMessage.getPart(1);
        ByteBuffer eventIdPartsBuffer = ByteBuffer.wrap(eventPart.getSerializedForm());
        long threadId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
        long sequenceId = EventID.readEventIdPartsFromOptmizedByteArray(eventIdPartsBuffer);
        EventID eventId = new EventID(serverConnection.getEventMemberIDByteArray(), threadId, sequenceId);
        // part 2: invoke callbacks (used by import)
        Part callbacksPart = clientMessage.getPart(2);
        boolean skipCallbacks = callbacksPart.getInt() == 1 ? true : false;
        // part 3: number of keys
        numberOfKeysPart = clientMessage.getPart(3);
        numberOfKeys = numberOfKeysPart.getInt();
        // building the map
        Map map = new LinkedHashMap();
        Map<Object, VersionTag> retryVersions = new LinkedHashMap<Object, VersionTag>();
        // Map isObjectMap = new LinkedHashMap();
        for (int i = 0; i < numberOfKeys; i++) {
            keyPart = clientMessage.getPart(4 + i * 2);
            key = keyPart.getStringOrObject();
            if (key == null) {
                String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_KEYS_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
                logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
                errMessage.append(putAllMsg);
                writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
                serverConnection.setAsTrue(RESPONDED);
                return;
            }
            valuePart = clientMessage.getPart(4 + i * 2 + 1);
            if (valuePart.isNull()) {
                String putAllMsg = LocalizedStrings.PutAll_ONE_OF_THE_INPUT_VALUES_FOR_THE_PUTALL_REQUEST_IS_NULL.toLocalizedString();
                logger.warn("{}: {}", serverConnection.getName(), putAllMsg);
                errMessage.append(putAllMsg);
                writeErrorResponse(clientMessage, MessageType.PUT_DATA_ERROR, errMessage.toString(), serverConnection);
                serverConnection.setAsTrue(RESPONDED);
                return;
            }
            // byte[] value = valuePart.getSerializedForm();
            Object value;
            if (valuePart.isObject()) {
                // skipCallbacks configurable this code will need to be updated.
                if (skipCallbacks && Token.INVALID.isSerializedValue(valuePart.getSerializedForm())) {
                    value = Token.INVALID;
                } else {
                    value = CachedDeserializableFactory.create(valuePart.getSerializedForm());
                }
            } else {
                value = valuePart.getSerializedForm();
            }
            // put serializedform for auth. It will be modified with auth callback
            if (clientMessage.isRetry()) {
                // Constuct the thread id/sequence id information for this element in the
                // put all map
                // The sequence id is constructed from the base sequence id and the offset
                EventID entryEventId = new EventID(eventId, i);
                // For PRs, the thread id assigned as a fake thread id.
                if (region instanceof PartitionedRegion) {
                    PartitionedRegion pr = (PartitionedRegion) region;
                    int bucketId = pr.getKeyInfo(key).getBucketId();
                    long entryThreadId = ThreadIdentifier.createFakeThreadIDForBulkOp(bucketId, entryEventId.getThreadID());
                    entryEventId = new EventID(entryEventId.getMembershipID(), entryThreadId, entryEventId.getSequenceID());
                }
                VersionTag tag = findVersionTagsForRetriedBulkOp(region, entryEventId);
                if (tag != null) {
                    retryVersions.put(key, tag);
                }
            // FIND THE VERSION TAG FOR THIS KEY - but how? all we have is the
            // putAll eventId, not individual eventIds for entries, right?
            }
            map.put(key, value);
        // isObjectMap.put(key, new Boolean(isObject));
        }
        if (clientMessage.getNumberOfParts() == (4 + 2 * numberOfKeys + 1)) {
            // it means optional
            // timeout has
            // been added
            int timeout = clientMessage.getPart(4 + 2 * numberOfKeys).getInt();
            serverConnection.setRequestSpecificTimeout(timeout);
        }
        this.securityService.authorizeRegionWrite(regionName);
        AuthorizeRequest authzRequest = serverConnection.getAuthzRequest();
        if (authzRequest != null) {
            if (DynamicRegionFactory.regionIsDynamicRegionList(regionName)) {
                authzRequest.createRegionAuthorize(regionName);
            } else {
                PutAllOperationContext putAllContext = authzRequest.putAllAuthorize(regionName, map, null);
                map = putAllContext.getMap();
                if (map instanceof UpdateOnlyMap) {
                    map = ((UpdateOnlyMap) map).getInternalMap();
                }
            }
        } else {
        // no auth, so update the map based on isObjectMap here
        /*
         * Collection entries = map.entrySet(); Iterator iterator = entries.iterator(); Map.Entry
         * mapEntry = null; while (iterator.hasNext()) { mapEntry = (Map.Entry)iterator.next();
         * Object currkey = mapEntry.getKey(); byte[] serializedValue = (byte[])mapEntry.getValue();
         * boolean isObject = ((Boolean)isObjectMap.get(currkey)).booleanValue(); if (isObject) {
         * map.put(currkey, CachedDeserializableFactory.create(serializedValue)); } }
         */
        }
        if (logger.isDebugEnabled()) {
            logger.debug("{}: Received putAll request ({} bytes) from {} for region {}", serverConnection.getName(), clientMessage.getPayloadLength(), serverConnection.getSocketString(), regionName);
        }
        response = region.basicBridgePutAll(map, retryVersions, serverConnection.getProxyID(), eventId, skipCallbacks, null);
        if (!region.getConcurrencyChecksEnabled()) {
            // the client only needs this if versioning is being used
            response = null;
        }
        if (region instanceof PartitionedRegion) {
            PartitionedRegion pr = (PartitionedRegion) region;
            if (pr.getNetworkHopType() != PartitionedRegion.NETWORK_HOP_NONE) {
                writeReplyWithRefreshMetadata(clientMessage, response, serverConnection, pr, pr.getNetworkHopType());
                pr.clearNetworkHopData();
                replyWithMetaData = true;
            }
        }
    } catch (RegionDestroyedException rde) {
        writeException(clientMessage, rde, false, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        return;
    } catch (ResourceException re) {
        writeException(clientMessage, re, false, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        return;
    } catch (PutAllPartialResultException pre) {
        writeException(clientMessage, pre, false, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        return;
    } catch (Exception ce) {
        // If an interrupted exception is thrown , rethrow it
        checkForInterrupt(serverConnection, ce);
        // If an exception occurs during the put, preserve the connection
        writeException(clientMessage, ce, false, serverConnection);
        serverConnection.setAsTrue(RESPONDED);
        // if (logger.fineEnabled()) {
        logger.warn(LocalizedMessage.create(LocalizedStrings.Generic_0_UNEXPECTED_EXCEPTION, serverConnection.getName()), ce);
        // }
        return;
    } finally {
        long oldStart = start;
        start = DistributionStats.getStatTime();
        stats.incProcessPutAllTime(start - oldStart);
    }
    if (logger.isDebugEnabled()) {
        logger.debug("{}: Sending putAll70 response back to {} for region {}: {}", serverConnection.getName(), serverConnection.getSocketString(), regionName, response);
    }
    // Starting in 7.0.1 we do not send the keys back
    if (response != null && Version.GFE_70.compareTo(serverConnection.getClientVersion()) < 0) {
        if (logger.isDebugEnabled()) {
            logger.debug("setting putAll keys to null");
        }
        response.setKeys(null);
    }
    // Increment statistics and write the reply
    if (!replyWithMetaData) {
        writeReply(clientMessage, response, serverConnection);
    }
    serverConnection.setAsTrue(RESPONDED);
    stats.incWritePutAllResponseTime(DistributionStats.getStatTime() - start);
}
Also used : UpdateOnlyMap(org.apache.geode.cache.operations.internal.UpdateOnlyMap) AuthorizeRequest(org.apache.geode.internal.security.AuthorizeRequest) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) LocalRegion(org.apache.geode.internal.cache.LocalRegion) LinkedHashMap(java.util.LinkedHashMap) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) CachedRegionHelper(org.apache.geode.internal.cache.tier.CachedRegionHelper) PutAllOperationContext(org.apache.geode.cache.operations.PutAllOperationContext) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) ResourceException(org.apache.geode.cache.ResourceException) ByteBuffer(java.nio.ByteBuffer) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) IOException(java.io.IOException) ResourceException(org.apache.geode.cache.ResourceException) CacheServerStats(org.apache.geode.internal.cache.tier.sockets.CacheServerStats) Part(org.apache.geode.internal.cache.tier.sockets.Part) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) EventID(org.apache.geode.internal.cache.EventID) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) UpdateOnlyMap(org.apache.geode.cache.operations.internal.UpdateOnlyMap)

Example 97 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class Oplog method readCrf.

/**
   * Return number of bytes read
   */
private long readCrf(OplogEntryIdSet deletedIds, boolean recoverValues, boolean latestOplog) {
    this.recoverNewEntryId = DiskStoreImpl.INVALID_ID;
    this.recoverModEntryId = DiskStoreImpl.INVALID_ID;
    this.recoverModEntryIdHWM = DiskStoreImpl.INVALID_ID;
    boolean readLastRecord = true;
    CountingDataInputStream dis = null;
    try {
        final LocalRegion currentRegion = LocalRegion.getInitializingRegion();
        final Version version = getProductVersionIfOld();
        final ByteArrayDataInput in = new ByteArrayDataInput();
        final HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
        int recordCount = 0;
        boolean foundDiskStoreRecord = false;
        FileInputStream fis = null;
        try {
            fis = new FileInputStream(this.crf.f);
            dis = new CountingDataInputStream(new BufferedInputStream(fis, 1024 * 1024), this.crf.f.length());
            boolean endOfLog = false;
            while (!endOfLog) {
                // long startPosition = byteCount;
                if (dis.atEndOfFile()) {
                    endOfLog = true;
                    break;
                }
                readLastRecord = false;
                byte opCode = dis.readByte();
                if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "Oplog opCode={}", opCode);
                }
                switch(opCode) {
                    case OPLOG_EOF_ID:
                        // we are at the end of the oplog. So we need to back up one byte
                        dis.decrementCount();
                        endOfLog = true;
                        break;
                    case OPLOG_CONFLICT_VERSION:
                        this.readVersionTagOnlyEntry(dis, opCode);
                        break;
                    case OPLOG_NEW_ENTRY_BASE_ID:
                        {
                            long newEntryBase = dis.readLong();
                            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                                logger.trace(LogMarker.PERSIST_RECOVERY, "newEntryBase={}", newEntryBase);
                            }
                            readEndOfRecord(dis);
                            setRecoverNewEntryId(newEntryBase);
                            recordCount++;
                        }
                        break;
                    case OPLOG_NEW_ENTRY_0ID:
                        readNewEntry(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_MOD_ENTRY_1ID:
                    case OPLOG_MOD_ENTRY_2ID:
                    case OPLOG_MOD_ENTRY_3ID:
                    case OPLOG_MOD_ENTRY_4ID:
                    case OPLOG_MOD_ENTRY_5ID:
                    case OPLOG_MOD_ENTRY_6ID:
                    case OPLOG_MOD_ENTRY_7ID:
                    case OPLOG_MOD_ENTRY_8ID:
                        readModifyEntry(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_MOD_ENTRY_WITH_KEY_1ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_2ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_3ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_4ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_5ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_6ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_7ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_8ID:
                        readModifyEntryWithKey(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_DISK_STORE_ID:
                        readDiskStoreRecord(dis, this.crf.f);
                        foundDiskStoreRecord = true;
                        recordCount++;
                        break;
                    case OPLOG_MAGIC_SEQ_ID:
                        readOplogMagicSeqRecord(dis, this.crf.f, OPLOG_TYPE.CRF);
                        break;
                    case OPLOG_GEMFIRE_VERSION:
                        readGemfireVersionRecord(dis, this.crf.f);
                        recordCount++;
                        break;
                    case OPLOG_RVV:
                        readRVVRecord(dis, this.drf.f, false, latestOplog);
                        recordCount++;
                        break;
                    default:
                        throw new DiskAccessException(LocalizedStrings.Oplog_UNKNOWN_OPCODE_0_FOUND_IN_DISK_OPERATION_LOG.toLocalizedString(opCode), getParent());
                }
                readLastRecord = true;
            // @todo
            // if (rgn.isDestroyed()) {
            // break;
            // }
            }
        // while
        } finally {
            if (dis != null) {
                dis.close();
            }
            if (fis != null) {
                fis.close();
            }
        }
        if (!foundDiskStoreRecord && recordCount > 0) {
            throw new DiskAccessException("The oplog file \"" + this.crf.f + "\" does not belong to the init file \"" + getParent().getInitFile() + "\". Crf did not contain a disk store id.", getParent());
        }
    } catch (EOFException ignore) {
    // ignore since a partial record write can be caused by a crash
    } catch (IOException ex) {
        getParent().getCancelCriterion().checkCancelInProgress(ex);
        throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.crf.f.getPath()), ex, getParent());
    } catch (CancelException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", e);
        }
    } catch (RegionDestroyedException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", e);
        }
    } catch (IllegalStateException e) {
        throw e;
    }
    // Add the Oplog size to the Directory Holder which owns this oplog,
    // so that available space is correctly calculated & stats updated.
    long byteCount = 0;
    if (!readLastRecord) {
        // this means that there was a crash
        // and hence we should not continue to read
        // the next oplog
        this.crashed = true;
        if (dis != null) {
            byteCount = dis.getFileLength();
        }
    } else {
        if (dis != null) {
            byteCount = dis.getCount();
        }
    }
    return byteCount;
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteArrayDataInput(org.apache.geode.internal.ByteArrayDataInput) FileInputStream(java.io.FileInputStream) Version(org.apache.geode.internal.Version) BufferedInputStream(java.io.BufferedInputStream) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) DiskAccessException(org.apache.geode.cache.DiskAccessException) EOFException(java.io.EOFException) CancelException(org.apache.geode.CancelException)

Example 98 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class PRHARedundancyProvider method scheduleRedundancyRecovery.

/**
   * Schedule a task to perform redundancy recovery for a new node or for the node departed.
   */
public void scheduleRedundancyRecovery(Object failedMemId) {
    final boolean isStartup = failedMemId == null ? true : false;
    final InternalCache cache = this.prRegion.getCache();
    final int redundantCopies = PRHARedundancyProvider.this.prRegion.getRedundantCopies();
    final long delay;
    final boolean movePrimaries;
    if (isStartup) {
        delay = this.prRegion.getPartitionAttributes().getStartupRecoveryDelay();
        movePrimaries = !Boolean.getBoolean(DistributionConfig.GEMFIRE_PREFIX + "DISABLE_MOVE_PRIMARIES_ON_STARTUP");
    } else {
        delay = this.prRegion.getPartitionAttributes().getRecoveryDelay();
        movePrimaries = false;
    }
    final boolean requiresRedundancyRecovery = delay >= 0;
    if (!requiresRedundancyRecovery) {
        return;
    }
    if (!PRHARedundancyProvider.this.prRegion.isDataStore()) {
        return;
    }
    Runnable task = new RecoveryRunnable(this) {

        @Override
        public void run2() {
            try {
                final boolean isFixedPartitionedRegion = PRHARedundancyProvider.this.prRegion.isFixedPartitionedRegion();
                // Fix for 43582 - always replace offline data for fixed partitioned
                // regions - this guarantees we create the buckets we are supposed to
                // create on this node.
                boolean replaceOfflineData = isFixedPartitionedRegion || !isStartup;
                RebalanceDirector director;
                if (isFixedPartitionedRegion) {
                    director = new FPRDirector(true, movePrimaries);
                } else {
                    director = new CompositeDirector(true, true, false, movePrimaries);
                }
                final PartitionedRegionRebalanceOp rebalance = new PartitionedRegionRebalanceOp(PRHARedundancyProvider.this.prRegion, false, director, replaceOfflineData, false);
                long start = PRHARedundancyProvider.this.prRegion.getPrStats().startRecovery();
                if (isFixedPartitionedRegion) {
                    rebalance.executeFPA();
                } else {
                    rebalance.execute();
                }
                PRHARedundancyProvider.this.prRegion.getPrStats().endRecovery(start);
                PRHARedundancyProvider.this.recoveryFuture = null;
            } catch (CancelException e) {
                logger.debug("Cache closed while recovery in progress");
            } catch (RegionDestroyedException e) {
                logger.debug("Region destroyed while recovery in progress");
            } catch (Exception e) {
                logger.error(LocalizedMessage.create(LocalizedStrings.PRHARedundancyProvider_UNEXPECTED_EXCEPTION_DURING_BUCKET_RECOVERY), e);
            }
        }
    };
    synchronized (this.shutdownLock) {
        // possible fix for bug 41094
        if (!this.shutdown) {
            try {
                if (logger.isDebugEnabled()) {
                    if (isStartup) {
                        logger.debug(this.prRegion + " scheduling redundancy recovery in {} ms", delay);
                    } else {
                        logger.debug("prRegion scheduling redundancy recovery after departure/crash/error in {} in {} ms", failedMemId, delay);
                    }
                }
                recoveryFuture = this.recoveryExecutor.schedule(task, delay, TimeUnit.MILLISECONDS);
            } catch (RejectedExecutionException e) {
            // ok, the executor is shutting down.
            }
        }
    }
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CompositeDirector(org.apache.geode.internal.cache.partitioned.rebalance.CompositeDirector) PartitionedRegionStorageException(org.apache.geode.cache.PartitionedRegionStorageException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) CacheClosedException(org.apache.geode.cache.CacheClosedException) CancelException(org.apache.geode.CancelException) PartitionOfflineException(org.apache.geode.cache.persistence.PartitionOfflineException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RebalanceDirector(org.apache.geode.internal.cache.partitioned.rebalance.RebalanceDirector) FPRDirector(org.apache.geode.internal.cache.partitioned.rebalance.FPRDirector) CancelException(org.apache.geode.CancelException)

Example 99 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class Oplog method recoverValuesIfNeeded.

/**
   * This method is called by the async value recovery task to recover the values from the crf if
   * the keys were recovered from the krf.
   */
public void recoverValuesIfNeeded(Map<Long, DiskRecoveryStore> diskRecoveryStores) {
    // Early out if we start closing the parent.
    if (getParent().isClosing()) {
        return;
    }
    List<KRFEntry> sortedLiveEntries;
    HashMap<Long, DiskRegionInfo> targetRegions = new HashMap<Long, DiskRegionInfo>(this.regionMap);
    synchronized (diskRecoveryStores) {
        Iterator<DiskRecoveryStore> itr = diskRecoveryStores.values().iterator();
        while (itr.hasNext()) {
            DiskRecoveryStore store = itr.next();
            if (isLruValueRecoveryDisabled(store) || store.lruLimitExceeded()) {
                itr.remove();
            }
        }
        // Get the a sorted list of live entries from the target regions
        targetRegions.keySet().retainAll(diskRecoveryStores.keySet());
    }
    sortedLiveEntries = getSortedLiveEntries(targetRegions.values());
    if (sortedLiveEntries == null) {
        // There are no live entries in this oplog to recover.
        return;
    }
    final ByteArrayDataInput in = new ByteArrayDataInput();
    for (KRFEntry entry : sortedLiveEntries) {
        // Early out if we start closing the parent.
        if (getParent().isClosing()) {
            return;
        }
        DiskEntry diskEntry = entry.getDiskEntry();
        DiskRegionView diskRegionView = entry.getDiskRegionView();
        long diskRegionId = diskRegionView.getId();
        synchronized (diskRecoveryStores) {
            DiskRecoveryStore diskRecoveryStore = diskRecoveryStores.get(diskRegionId);
            if (diskRecoveryStore == null) {
                continue;
            }
            // Reset the disk region view because it may have changed
            // due to the region being created.
            diskRegionView = diskRecoveryStore.getDiskRegionView();
            if (diskRegionView == null) {
                continue;
            }
            if (diskRecoveryStore.lruLimitExceeded()) {
                diskRecoveryStores.remove(diskRegionId);
                continue;
            }
            if (diskRegionView.isEntriesMapIncompatible()) {
                // Refetch the disk entry because it may have changed due to copying
                // an incompatible region map
                diskEntry = (DiskEntry) diskRecoveryStore.getRegionMap().getEntryInVM(diskEntry.getKey());
                if (diskEntry == null) {
                    continue;
                }
            }
            synchronized (diskEntry) {
                // Make sure the entry hasn't been modified
                if (diskEntry.getDiskId() != null && diskEntry.getDiskId().getOplogId() == oplogId) {
                    try {
                        DiskEntry.Helper.recoverValue(diskEntry, getOplogId(), diskRecoveryStore, in);
                    } catch (RegionDestroyedException ignore) {
                        // This region has been destroyed, stop recovering from it.
                        diskRecoveryStores.remove(diskRegionId);
                    }
                }
            }
        }
    }
}
Also used : Long2ObjectOpenHashMap(it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) ByteArrayDataInput(org.apache.geode.internal.ByteArrayDataInput) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) DiskRecoveryStore(org.apache.geode.internal.cache.persistence.DiskRecoveryStore) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Example 100 with RegionDestroyedException

use of org.apache.geode.cache.RegionDestroyedException in project geode by apache.

the class PutAllPRMessage method doLocalPutAll.

/**
   * This method is called by both operateOnPartitionedRegion() when processing a remote msg or by
   * sendMsgByBucket() when processing a msg targeted to local Jvm. PartitionedRegion Note: It is
   * very important that this message does NOT cause any deadlocks as the sender will wait
   * indefinitely for the acknowledgment
   * 
   * @param r partitioned region eventSender the endpoint server who received request from client
   *        lastModified timestamp for last modification
   * @return If succeeds, return true, otherwise, throw exception
   */
@edu.umd.cs.findbugs.annotations.SuppressWarnings("IMSE_DONT_CATCH_IMSE")
public boolean doLocalPutAll(PartitionedRegion r, InternalDistributedMember eventSender, long lastModified) throws EntryExistsException, ForceReattemptException, DataLocationException {
    boolean didPut = false;
    long clientReadTimeOut = PoolFactory.DEFAULT_READ_TIMEOUT;
    if (r.hasServerProxy()) {
        clientReadTimeOut = r.getServerProxy().getPool().getReadTimeout();
        if (logger.isDebugEnabled()) {
            logger.debug("PutAllPRMessage: doLocalPutAll: clientReadTimeOut is {}", clientReadTimeOut);
        }
    }
    DistributedPutAllOperation dpao = null;
    @Released EntryEventImpl baseEvent = null;
    BucketRegion bucketRegion = null;
    PartitionedRegionDataStore ds = r.getDataStore();
    InternalDistributedMember myId = r.getDistributionManager().getDistributionManagerId();
    try {
        if (!notificationOnly) {
            // bucketRegion is not null only when !notificationOnly
            bucketRegion = ds.getInitializedBucketForId(null, bucketId);
            this.versions = new VersionedObjectList(this.putAllPRDataSize, true, bucketRegion.getAttributes().getConcurrencyChecksEnabled());
            // create a base event and a DPAO for PutAllMessage distributed btw redundant buckets
            baseEvent = EntryEventImpl.create(bucketRegion, Operation.PUTALL_CREATE, null, null, this.callbackArg, true, eventSender, !skipCallbacks, true);
            // set baseEventId to the first entry's event id. We need the thread id for DACE
            baseEvent.setEventId(putAllPRData[0].getEventID());
            if (this.bridgeContext != null) {
                baseEvent.setContext(this.bridgeContext);
            }
            baseEvent.setPossibleDuplicate(this.posDup);
            if (logger.isDebugEnabled()) {
                logger.debug("PutAllPRMessage.doLocalPutAll: eventSender is {}, baseEvent is {}, msg is {}", eventSender, baseEvent, this);
            }
            dpao = new DistributedPutAllOperation(baseEvent, putAllPRDataSize, false);
        }
        // Fix the updateMsg misorder issue
        // Lock the keys when doing postPutAll
        Object[] keys = new Object[putAllPRDataSize];
        for (int i = 0; i < putAllPRDataSize; ++i) {
            keys[i] = putAllPRData[i].getKey();
        }
        if (!notificationOnly) {
            try {
                if (putAllPRData.length > 0) {
                    if (this.posDup && bucketRegion.getConcurrencyChecksEnabled()) {
                        if (logger.isDebugEnabled()) {
                            logger.debug("attempting to locate version tags for retried event");
                        }
                        // of the previous attempt
                        for (int i = 0; i < putAllPRDataSize; i++) {
                            if (putAllPRData[i].versionTag == null) {
                                putAllPRData[i].versionTag = bucketRegion.findVersionTagForClientBulkOp(putAllPRData[i].getEventID());
                                if (putAllPRData[i].versionTag != null) {
                                    putAllPRData[i].versionTag.replaceNullIDs(bucketRegion.getVersionMember());
                                }
                            }
                        }
                    }
                    EventID eventID = putAllPRData[0].getEventID();
                    ThreadIdentifier membershipID = new ThreadIdentifier(eventID.getMembershipID(), eventID.getThreadID());
                    bucketRegion.recordBulkOpStart(membershipID, eventID);
                }
                bucketRegion.waitUntilLocked(keys);
                boolean lockedForPrimary = false;
                final HashMap succeeded = new HashMap();
                PutAllPartialResult partialKeys = new PutAllPartialResult(putAllPRDataSize);
                Object key = keys[0];
                try {
                    bucketRegion.doLockForPrimary(false);
                    lockedForPrimary = true;
                    /*
             * The real work to be synchronized, it will take long time. We don't worry about
             * another thread to send any msg which has the same key in this request, because these
             * request will be blocked by foundKey
             */
                    for (int i = 0; i < putAllPRDataSize; i++) {
                        @Released EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                        try {
                            key = ev.getKey();
                            ev.setPutAllOperation(dpao);
                            // make sure a local update inserts a cache de-serializable
                            ev.makeSerializedNewValue();
                            // then in basicPutPart3(), the ev is added into dpao
                            try {
                                didPut = r.getDataView().putEntryOnRemote(ev, false, false, null, false, lastModified, true);
                                if (didPut && logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally success for {}", ev);
                                }
                            } catch (ConcurrentCacheModificationException e) {
                                didPut = true;
                                if (logger.isDebugEnabled()) {
                                    logger.debug("PutAllPRMessage.doLocalPutAll:putLocally encountered concurrent cache modification for {}", ev, e);
                                }
                            }
                            putAllPRData[i].setTailKey(ev.getTailKey());
                            if (!didPut) {
                                // make sure the region hasn't gone away
                                r.checkReadiness();
                                ForceReattemptException fre = new ForceReattemptException("unable to perform put in PutAllPR, but operation should not fail");
                                fre.setHash(ev.getKey().hashCode());
                                throw fre;
                            } else {
                                succeeded.put(putAllPRData[i].getKey(), putAllPRData[i].getValue());
                                this.versions.addKeyAndVersion(putAllPRData[i].getKey(), ev.getVersionTag());
                            }
                        } finally {
                            ev.release();
                        }
                    }
                // for
                } catch (IllegalMonitorStateException ignore) {
                    throw new ForceReattemptException("unable to get lock for primary, retrying... ");
                } catch (CacheWriterException cwe) {
                    // encounter cacheWriter exception
                    partialKeys.saveFailedKey(key, cwe);
                } finally {
                    try {
                        // Only PutAllPRMessage knows if the thread id is fake. Event has no idea.
                        // So we have to manually set useFakeEventId for this DPAO
                        dpao.setUseFakeEventId(true);
                        r.checkReadiness();
                        bucketRegion.getDataView().postPutAll(dpao, this.versions, bucketRegion);
                    } finally {
                        if (lockedForPrimary) {
                            bucketRegion.doUnlockForPrimary();
                        }
                    }
                }
                if (partialKeys.hasFailure()) {
                    partialKeys.addKeysAndVersions(this.versions);
                    if (logger.isDebugEnabled()) {
                        logger.debug("PutAllPRMessage: partial keys applied, map to bucket {}'s keys: {}. Applied {}", bucketId, Arrays.toString(keys), succeeded);
                    }
                    throw new PutAllPartialResultException(partialKeys);
                }
            } catch (RegionDestroyedException e) {
                ds.checkRegionDestroyedOnBucket(bucketRegion, true, e);
            } finally {
                bucketRegion.removeAndNotifyKeys(keys);
            }
        } else {
            for (int i = 0; i < putAllPRDataSize; i++) {
                EntryEventImpl ev = getEventFromEntry(r, myId, eventSender, i, putAllPRData, notificationOnly, bridgeContext, posDup, skipCallbacks);
                try {
                    ev.setOriginRemote(true);
                    if (this.callbackArg != null) {
                        ev.setCallbackArgument(this.callbackArg);
                    }
                    r.invokePutCallbacks(ev.getOperation().isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, ev, r.isInitialized(), true);
                } finally {
                    ev.release();
                }
            }
        }
    } finally {
        if (baseEvent != null)
            baseEvent.release();
        if (dpao != null)
            dpao.freeOffHeapResources();
    }
    return true;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) EntryEventImpl(org.apache.geode.internal.cache.EntryEventImpl) HashMap(java.util.HashMap) ThreadIdentifier(org.apache.geode.internal.cache.ha.ThreadIdentifier) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) VersionedObjectList(org.apache.geode.internal.cache.tier.sockets.VersionedObjectList) DistributedPutAllOperation(org.apache.geode.internal.cache.DistributedPutAllOperation) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) PutAllPartialResult(org.apache.geode.internal.cache.PutAllPartialResultException.PutAllPartialResult) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) PutAllPartialResultException(org.apache.geode.internal.cache.PutAllPartialResultException) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) BucketRegion(org.apache.geode.internal.cache.BucketRegion) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) EventID(org.apache.geode.internal.cache.EventID) CacheWriterException(org.apache.geode.cache.CacheWriterException)

Aggregations

RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)124 CancelException (org.apache.geode.CancelException)41 LocalRegion (org.apache.geode.internal.cache.LocalRegion)37 Region (org.apache.geode.cache.Region)35 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)28 IOException (java.io.IOException)25 Cache (org.apache.geode.cache.Cache)20 CacheException (org.apache.geode.cache.CacheException)19 QueryException (org.apache.geode.cache.query.QueryException)16 QueryInvocationTargetException (org.apache.geode.cache.query.QueryInvocationTargetException)16 ReplyException (org.apache.geode.distributed.internal.ReplyException)16 CacheClosedException (org.apache.geode.cache.CacheClosedException)14 EntryNotFoundException (org.apache.geode.cache.EntryNotFoundException)14 SelectResults (org.apache.geode.cache.query.SelectResults)13 EventID (org.apache.geode.internal.cache.EventID)13 Test (org.junit.Test)13 Iterator (java.util.Iterator)12 TransactionDataNotColocatedException (org.apache.geode.cache.TransactionDataNotColocatedException)12 CacheSerializableRunnable (org.apache.geode.cache30.CacheSerializableRunnable)12 QueryService (org.apache.geode.cache.query.QueryService)11