Search in sources :

Example 71 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class AbstractRegionMap method txApplyDestroy.

public void txApplyDestroy(Object key, TransactionId txId, TXRmtEvent txEvent, boolean inTokenMode, boolean inRI, Operation op, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, boolean isOriginRemote, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
    final boolean isDebugEnabled = logger.isDebugEnabled();
    final LocalRegion owner = _getOwner();
    final boolean isRegionReady = !inTokenMode;
    final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
    boolean cbEventInPending = false;
    IndexManager oqlIndexManager = owner.getIndexManager();
    try {
        RegionEntry re = getEntry(key);
        if (re != null) {
            // happened in parallel as it also does index INIT.
            if (oqlIndexManager != null) {
                oqlIndexManager.waitForIndexInit();
            }
            try {
                synchronized (re) {
                    if (!re.isRemoved() || re.isTombstone()) {
                        Object oldValue = re.getValueInVM(owner);
                        final int oldSize = owner.calculateRegionEntryValueSize(re);
                        // Create an entry event only if the calling context is
                        // a receipt of a TXCommitMessage AND there are callbacks installed
                        // for this region
                        boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
                        @Released EntryEventImpl cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
                        try {
                            if (owner.isUsedForPartitionedRegionBucket()) {
                                txHandleWANEvent(owner, cbEvent, txEntryState);
                            }
                            cbEvent.setRegionEntry(re);
                            cbEvent.setOldValue(oldValue);
                            if (isDebugEnabled) {
                                logger.debug("txApplyDestroy cbEvent={}", cbEvent);
                            }
                            txRemoveOldIndexEntry(Operation.DESTROY, re);
                            if (txEvent != null) {
                                txEvent.addDestroy(owner, re, re.getKey(), aCallbackArgument);
                            }
                            boolean clearOccured = false;
                            try {
                                processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
                                if (inTokenMode) {
                                    if (oldValue == Token.TOMBSTONE) {
                                        owner.unscheduleTombstone(re);
                                    }
                                    re.setValue(owner, Token.DESTROYED);
                                } else {
                                    if (!re.isTombstone()) {
                                        {
                                            if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
                                                re.makeTombstone(owner, cbEvent.getVersionTag());
                                            } else {
                                                // fix for bug 43063
                                                re.removePhase1(owner, false);
                                                re.removePhase2();
                                                removeEntry(key, re, false);
                                            }
                                        }
                                    } else {
                                        owner.rescheduleTombstone(re, re.getVersionStamp().asVersionTag());
                                    }
                                }
                                EntryLogger.logTXDestroy(_getOwnerObject(), key);
                                owner.updateSizeOnRemove(key, oldSize);
                            } catch (RegionClearedException rce) {
                                clearOccured = true;
                            }
                            owner.txApplyDestroyPart2(re, re.getKey(), inTokenMode, clearOccured);
                            if (invokeCallbacks) {
                                switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                if (pendingCallbacks == null) {
                                    owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, true);
                                } else {
                                    pendingCallbacks.add(cbEvent);
                                    cbEventInPending = true;
                                }
                            }
                            if (!clearOccured) {
                                lruEntryDestroy(re);
                            }
                            if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
                                txEntryState.setVersionTag(cbEvent.getVersionTag());
                            }
                        } finally {
                            if (!cbEventInPending)
                                cbEvent.release();
                        }
                    }
                }
            } finally {
                if (oqlIndexManager != null) {
                    oqlIndexManager.countDownIndexUpdaters();
                }
            }
        } else if (inTokenMode || owner.concurrencyChecksEnabled) {
            // treating tokenMode and re == null as same, since we now want to
            // generate versions and Tombstones for destroys
            boolean dispatchListenerEvent = inTokenMode;
            boolean opCompleted = false;
            // TODO: if inTokenMode then Token.DESTROYED is ok but what about !inTokenMode because
            // owner.concurrencyChecksEnabled? In that case we do not want a DESTROYED token.
            RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.DESTROYED);
            if (oqlIndexManager != null) {
                oqlIndexManager.waitForIndexInit();
            }
            EntryEventImpl cbEvent = null;
            try {
                synchronized (newRe) {
                    RegionEntry oldRe = putEntryIfAbsent(key, newRe);
                    while (!opCompleted && oldRe != null) {
                        synchronized (oldRe) {
                            if (oldRe.isRemovedPhase2()) {
                                owner.getCachePerfStats().incRetries();
                                _getMap().remove(key, oldRe);
                                oldRe = putEntryIfAbsent(key, newRe);
                            } else {
                                try {
                                    boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
                                    cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
                                    try {
                                        cbEvent.setRegionEntry(oldRe);
                                        cbEvent.setOldValue(Token.NOT_AVAILABLE);
                                        if (isDebugEnabled) {
                                            logger.debug("txApplyDestroy token mode cbEvent={}", cbEvent);
                                        }
                                        if (owner.isUsedForPartitionedRegionBucket()) {
                                            txHandleWANEvent(owner, cbEvent, txEntryState);
                                        }
                                        processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
                                        if (invokeCallbacks) {
                                            switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                            if (pendingCallbacks == null) {
                                                owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, dispatchListenerEvent);
                                            } else {
                                                pendingCallbacks.add(cbEvent);
                                                cbEventInPending = true;
                                            }
                                        }
                                        int oldSize = 0;
                                        boolean wasTombstone = oldRe.isTombstone();
                                        {
                                            if (!wasTombstone) {
                                                oldSize = owner.calculateRegionEntryValueSize(oldRe);
                                            }
                                        }
                                        oldRe.setValue(owner, Token.DESTROYED);
                                        EntryLogger.logTXDestroy(_getOwnerObject(), key);
                                        if (wasTombstone) {
                                            owner.unscheduleTombstone(oldRe);
                                        }
                                        owner.updateSizeOnRemove(oldRe.getKey(), oldSize);
                                        owner.txApplyDestroyPart2(oldRe, oldRe.getKey(), inTokenMode, false);
                                        lruEntryDestroy(oldRe);
                                    } finally {
                                        if (!cbEventInPending)
                                            cbEvent.release();
                                    }
                                } catch (RegionClearedException rce) {
                                    owner.txApplyDestroyPart2(oldRe, oldRe.getKey(), inTokenMode, true);
                                }
                                if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
                                    oldRe.makeTombstone(owner, cbEvent.getVersionTag());
                                } else if (!inTokenMode) {
                                    // only remove for NORMAL regions if they do not generate versions see 51781
                                    // fix for bug 43063
                                    oldRe.removePhase1(owner, false);
                                    oldRe.removePhase2();
                                    removeEntry(key, oldRe, false);
                                }
                                opCompleted = true;
                            }
                        }
                    }
                    if (!opCompleted) {
                        // already has value set to Token.DESTROYED
                        opCompleted = true;
                        boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady || inRI);
                        cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
                        try {
                            cbEvent.setRegionEntry(newRe);
                            cbEvent.setOldValue(Token.NOT_AVAILABLE);
                            if (isDebugEnabled) {
                                logger.debug("txApplyDestroy token mode cbEvent={}", cbEvent);
                            }
                            if (owner.isUsedForPartitionedRegionBucket()) {
                                txHandleWANEvent(owner, cbEvent, txEntryState);
                            }
                            processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
                            if (invokeCallbacks) {
                                switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                if (pendingCallbacks == null) {
                                    owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, dispatchListenerEvent);
                                } else {
                                    pendingCallbacks.add(cbEvent);
                                    cbEventInPending = true;
                                }
                            }
                            EntryLogger.logTXDestroy(_getOwnerObject(), key);
                            owner.updateSizeOnCreate(newRe.getKey(), 0);
                            if (shouldPerformConcurrencyChecks(owner, cbEvent) && cbEvent.getVersionTag() != null) {
                                newRe.makeTombstone(owner, cbEvent.getVersionTag());
                            } else if (!inTokenMode) {
                                // only remove for NORMAL regions if they do not generate versions see 51781
                                // fix for bug 43063
                                newRe.removePhase1(owner, false);
                                newRe.removePhase2();
                                removeEntry(key, newRe, false);
                            }
                            owner.txApplyDestroyPart2(newRe, newRe.getKey(), inTokenMode, false);
                        // Note no need for LRU work since the entry is destroyed
                        // and will be removed when gii completes
                        } finally {
                            if (!cbEventInPending)
                                cbEvent.release();
                        }
                    }
                    if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
                        txEntryState.setVersionTag(cbEvent.getVersionTag());
                    }
                }
            } catch (RegionClearedException e) {
            // TODO
            } finally {
                if (oqlIndexManager != null) {
                    oqlIndexManager.countDownIndexUpdaters();
                }
            }
        } else if (re == null) {
            // Fix bug#43594
            // In cases where bucket region is re-created, it may so happen that
            // the destroy is already applied on the Initial image provider, thus
            // causing region entry to be absent.
            // Notify clients with client events.
            @Released EntryEventImpl cbEvent = createCBEvent(owner, op, key, null, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
            try {
                if (owner.isUsedForPartitionedRegionBucket()) {
                    txHandleWANEvent(owner, cbEvent, txEntryState);
                }
                switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                if (pendingCallbacks == null) {
                    owner.invokeTXCallbacks(EnumListenerEvent.AFTER_DESTROY, cbEvent, false);
                } else {
                    pendingCallbacks.add(cbEvent);
                    cbEventInPending = true;
                }
            } finally {
                if (!cbEventInPending)
                    cbEvent.release();
            }
        }
    } catch (DiskAccessException dae) {
        owner.handleDiskAccessException(dae);
        throw dae;
    }
}
Also used : IndexManager(org.apache.geode.cache.query.internal.index.IndexManager) Released(org.apache.geode.internal.offheap.annotations.Released) DiskAccessException(org.apache.geode.cache.DiskAccessException) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 72 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class PartitionedRegion method createAndValidatePersistentConfig.

private void createAndValidatePersistentConfig() {
    DiskStoreImpl dsi = this.getDiskStore();
    if (this.dataPolicy.withPersistence() && !this.concurrencyChecksEnabled && supportsConcurrencyChecks()) {
        logger.info(LocalizedMessage.create(LocalizedStrings.PartitionedRegion_ENABLING_CONCURRENCY_CHECKS_FOR_PERSISTENT_PR, this.getFullPath()));
        this.concurrencyChecksEnabled = true;
    }
    if (dsi != null && this.getDataPolicy().withPersistence()) {
        String colocatedWith = colocatedWithRegion == null ? "" : colocatedWithRegion.getFullPath();
        PRPersistentConfig config = dsi.getPersistentPRConfig(this.getFullPath());
        if (config != null) {
            if (config.getTotalNumBuckets() != this.getTotalNumberOfBuckets()) {
                Object[] prms = new Object[] { this.getFullPath(), this.getTotalNumberOfBuckets(), config.getTotalNumBuckets() };
                throw new IllegalStateException(LocalizedStrings.PartitionedRegion_FOR_REGION_0_TotalBucketNum_1_SHOULD_NOT_BE_CHANGED_Previous_Configured_2.toString(prms));
            }
            // a record to disk, so we won't allow that right now either.
            if (!colocatedWith.equals(config.getColocatedWith())) {
                Object[] prms = new Object[] { this.getFullPath(), colocatedWith, config.getColocatedWith() };
                DiskAccessException dae = new DiskAccessException(LocalizedStrings.LocalRegion_A_DISKACCESSEXCEPTION_HAS_OCCURRED_WHILE_WRITING_TO_THE_DISK_FOR_REGION_0_THE_REGION_WILL_BE_CLOSED.toLocalizedString(this.getFullPath()), null, dsi);
                dsi.handleDiskAccessException(dae);
                throw new IllegalStateException(LocalizedStrings.PartitionedRegion_FOR_REGION_0_ColocatedWith_1_SHOULD_NOT_BE_CHANGED_Previous_Configured_2.toString(prms));
            }
        } else {
            config = new PRPersistentConfig(this.getTotalNumberOfBuckets(), colocatedWith);
            dsi.addPersistentPR(this.getFullPath(), config);
            // as well.
            if (colocatedWithRegion != null && colocatedWithRegion.getDiskStore() != null && colocatedWithRegion.getDiskStore() != dsi) {
                colocatedWithRegion.getDiskStore().addPersistentPR(this.getFullPath(), config);
            }
        }
    }
}
Also used : PRPersistentConfig(org.apache.geode.internal.cache.persistence.PRPersistentConfig) DiskAccessException(org.apache.geode.cache.DiskAccessException)

Example 73 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class LocalRegion method createReadEntry.

TXEntryState createReadEntry(TXRegionState txRegionState, KeyInfo keyInfo, boolean createIfAbsent) {
    TXEntryState result = null;
    final RegionEntry regionEntry = this.basicGetTXEntry(keyInfo);
    if (regionEntry != null) {
        boolean needsLRUCleanup = false;
        try {
            synchronized (regionEntry) {
                if (!regionEntry.isRemoved()) {
                    if (regionEntry instanceof DiskEntry && regionEntry instanceof LRUEntry) {
                        LRUEntry le = (LRUEntry) regionEntry;
                        if (le.testEvicted()) {
                            // Handle the case where we fault in a disk entry
                            txLRUStart();
                            needsLRUCleanup = true;
                            // Fault in the value from disk
                            regionEntry.getValue(this);
                        }
                    }
                    Object value = regionEntry.getValueInVM(this);
                    /*
             * The tx will need the raw value for identity comparison. Please see
             * TXEntryState#checkForConflict(LocalRegion,Object)
             */
                    Object id = regionEntry.getTransformedValue();
                    result = txRegionState.createReadEntry(this, keyInfo.getKey(), regionEntry, id, value);
                }
            }
        } catch (DiskAccessException dae) {
            handleDiskAccessException(dae);
            needsLRUCleanup = false;
            throw dae;
        } finally {
            if (needsLRUCleanup) {
                // do this after releasing sync
                txLRUEnd();
            }
        }
    }
    if (result == null && createIfAbsent) {
        result = txRegionState.createReadEntry(this, keyInfo.getKey(), null, null, null);
    }
    return result;
}
Also used : LRUEntry(org.apache.geode.internal.cache.lru.LRUEntry) DiskAccessException(org.apache.geode.cache.DiskAccessException) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 74 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class DiskInitFileParser method parse.

public DiskStoreID parse() throws IOException, ClassNotFoundException {
    Version gfversion = Version.GFE_662;
    DiskStoreID result = null;
    boolean endOfFile = false;
    while (!endOfFile) {
        if (dis.atEndOfFile()) {
            endOfFile = true;
            break;
        }
        byte opCode = dis.readByte();
        if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
            logger.trace(LogMarker.PERSIST_RECOVERY, "DiskInitFile opcode={}", opCode);
        }
        switch(opCode) {
            case DiskInitFile.IF_EOF_ID:
                endOfFile = true;
                gotEOF = true;
                break;
            case DiskInitFile.IFREC_INSTANTIATOR_ID:
                {
                    int id = dis.readInt();
                    String cn = readClassName(dis);
                    String icn = readClassName(dis);
                    readEndOfRecord(dis);
                    interpreter.cmnInstantiatorId(id, cn, icn);
                }
                break;
            case DiskInitFile.IFREC_DATA_SERIALIZER_ID:
                {
                    Class<?> dsc = readClass(dis);
                    readEndOfRecord(dis);
                    interpreter.cmnDataSerializerId(dsc);
                }
                break;
            case DiskInitFile.IFREC_ONLINE_MEMBER_ID:
                {
                    long drId = readDiskRegionID(dis);
                    PersistentMemberID pmid = readPMID(dis, gfversion);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_ONLINE_MEMBER_ID drId={} omid={}", drId, pmid);
                    }
                    interpreter.cmnOnlineMemberId(drId, pmid);
                }
                break;
            case DiskInitFile.IFREC_OFFLINE_MEMBER_ID:
                {
                    long drId = readDiskRegionID(dis);
                    PersistentMemberID pmid = readPMID(dis, gfversion);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_OFFLINE_MEMBER_ID drId={} pmid={}", drId, pmid);
                    }
                    interpreter.cmnOfflineMemberId(drId, pmid);
                }
                break;
            case DiskInitFile.IFREC_RM_MEMBER_ID:
                {
                    long drId = readDiskRegionID(dis);
                    PersistentMemberID pmid = readPMID(dis, gfversion);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_RM_MEMBER_ID drId={} pmid={}", drId, pmid);
                    }
                    interpreter.cmnRmMemberId(drId, pmid);
                }
                break;
            case DiskInitFile.IFREC_MY_MEMBER_INITIALIZING_ID:
                {
                    long drId = readDiskRegionID(dis);
                    PersistentMemberID pmid = readPMID(dis, gfversion);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_MY_MEMBER_INITIALIZING_ID drId={} pmid={}", drId, pmid);
                    }
                    interpreter.cmnAddMyInitializingPMID(drId, pmid);
                }
                break;
            case DiskInitFile.IFREC_MY_MEMBER_INITIALIZED_ID:
                {
                    long drId = readDiskRegionID(dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_MY_MEMBER_INITIALIZED_ID drId={}", drId);
                    }
                    interpreter.cmnMarkInitialized(drId);
                }
                break;
            case DiskInitFile.IFREC_CREATE_REGION_ID:
                {
                    long drId = readDiskRegionID(dis);
                    String regName = dis.readUTF();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CREATE_REGION_ID drId= name={}", drId, regName);
                    }
                    interpreter.cmnCreateRegion(drId, regName);
                }
                break;
            case DiskInitFile.IFREC_BEGIN_DESTROY_REGION_ID:
                {
                    long drId = readDiskRegionID(dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_BEGIN_DESTROY_REGION_ID drId={}", drId);
                    }
                    interpreter.cmnBeginDestroyRegion(drId);
                }
                break;
            case DiskInitFile.IFREC_END_DESTROY_REGION_ID:
                {
                    long drId = readDiskRegionID(dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_END_DESTROY_REGION_ID drId={}", drId);
                    }
                    interpreter.cmnEndDestroyRegion(drId);
                }
                break;
            case DiskInitFile.IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID:
                {
                    long drId = readDiskRegionID(dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_BEGIN_PARTIAL_DESTROY_REGION_ID drId={}", drId);
                    }
                    interpreter.cmnBeginPartialDestroyRegion(drId);
                }
                break;
            case DiskInitFile.IFREC_END_PARTIAL_DESTROY_REGION_ID:
                {
                    long drId = readDiskRegionID(dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_END_PARTIAL_DESTROY_REGION_ID drId={}", drId);
                    }
                    interpreter.cmnEndPartialDestroyRegion(drId);
                }
                break;
            case DiskInitFile.IFREC_CLEAR_REGION_ID:
                {
                    long drId = readDiskRegionID(dis);
                    long clearOplogEntryId = dis.readLong();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CLEAR_REGION_ID drId={} oplogEntryId={}", drId, clearOplogEntryId);
                    }
                    interpreter.cmnClearRegion(drId, clearOplogEntryId);
                }
                break;
            case DiskInitFile.IFREC_CLEAR_REGION_WITH_RVV_ID:
                {
                    long drId = readDiskRegionID(dis);
                    int size = dis.readInt();
                    ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>> memberToVersion = new ConcurrentHashMap<DiskStoreID, RegionVersionHolder<DiskStoreID>>(size);
                    for (int i = 0; i < size; i++) {
                        DiskStoreID id = new DiskStoreID();
                        InternalDataSerializer.invokeFromData(id, dis);
                        RegionVersionHolder holder = new RegionVersionHolder(dis);
                        memberToVersion.put(id, holder);
                    }
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CLEAR_REGION_WITH_RVV_ID drId={} memberToVersion={}", drId, memberToVersion);
                    }
                    interpreter.cmnClearRegion(drId, memberToVersion);
                }
                break;
            case DiskInitFile.IFREC_CRF_CREATE:
                {
                    long oplogId = dis.readLong();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CRF_CREATE oplogId={}", oplogId);
                    }
                    interpreter.cmnCrfCreate(oplogId);
                }
                break;
            case DiskInitFile.IFREC_DRF_CREATE:
                {
                    long oplogId = dis.readLong();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_DRF_CREATE oplogId={}", oplogId);
                    }
                    interpreter.cmnDrfCreate(oplogId);
                }
                break;
            case DiskInitFile.IFREC_KRF_CREATE:
                {
                    long oplogId = dis.readLong();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_KRF_CREATE oplogId={}", oplogId);
                    }
                    interpreter.cmnKrfCreate(oplogId);
                }
                break;
            case DiskInitFile.IFREC_CRF_DELETE:
                {
                    long oplogId = dis.readLong();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_CRF_DELETE oplogId={}", oplogId);
                    }
                    interpreter.cmnCrfDelete(oplogId);
                }
                break;
            case DiskInitFile.IFREC_DRF_DELETE:
                {
                    long oplogId = dis.readLong();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_DRF_DELETE oplogId={}", oplogId);
                    }
                    interpreter.cmnDrfDelete(oplogId);
                }
                break;
            case DiskInitFile.IFREC_REGION_CONFIG_ID:
                {
                    long drId = readDiskRegionID(dis);
                    byte lruAlgorithm = dis.readByte();
                    byte lruAction = dis.readByte();
                    int lruLimit = dis.readInt();
                    int concurrencyLevel = dis.readInt();
                    int initialCapacity = dis.readInt();
                    float loadFactor = dis.readFloat();
                    boolean statisticsEnabled = dis.readBoolean();
                    boolean isBucket = dis.readBoolean();
                    EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
                    }
                    interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, // fixes bug 43910
                    ProxyBucketRegion.NO_FIXED_PARTITION_NAME, -1, null, false);
                }
                break;
            case DiskInitFile.IFREC_REGION_CONFIG_ID_66:
                {
                    long drId = readDiskRegionID(dis);
                    byte lruAlgorithm = dis.readByte();
                    byte lruAction = dis.readByte();
                    int lruLimit = dis.readInt();
                    int concurrencyLevel = dis.readInt();
                    int initialCapacity = dis.readInt();
                    float loadFactor = dis.readFloat();
                    boolean statisticsEnabled = dis.readBoolean();
                    boolean isBucket = dis.readBoolean();
                    EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
                    String partitionName = dis.readUTF();
                    int startingBucketId = dis.readInt();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
                    }
                    interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, null, false);
                }
                break;
            case DiskInitFile.IFREC_REGION_CONFIG_ID_80:
                {
                    long drId = readDiskRegionID(dis);
                    byte lruAlgorithm = dis.readByte();
                    byte lruAction = dis.readByte();
                    int lruLimit = dis.readInt();
                    int concurrencyLevel = dis.readInt();
                    int initialCapacity = dis.readInt();
                    float loadFactor = dis.readFloat();
                    boolean statisticsEnabled = dis.readBoolean();
                    boolean isBucket = dis.readBoolean();
                    EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
                    String partitionName = dis.readUTF();
                    int startingBucketId = dis.readInt();
                    String compressorClassName = dis.readUTF();
                    if ("".equals(compressorClassName)) {
                        compressorClassName = null;
                    }
                    if (dis.readBoolean()) {
                        flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
                    }
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
                    }
                    interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, compressorClassName, false);
                }
                break;
            case DiskInitFile.IFREC_REGION_CONFIG_ID_90:
                {
                    long drId = readDiskRegionID(dis);
                    byte lruAlgorithm = dis.readByte();
                    byte lruAction = dis.readByte();
                    int lruLimit = dis.readInt();
                    int concurrencyLevel = dis.readInt();
                    int initialCapacity = dis.readInt();
                    float loadFactor = dis.readFloat();
                    boolean statisticsEnabled = dis.readBoolean();
                    boolean isBucket = dis.readBoolean();
                    EnumSet<DiskRegionFlag> flags = EnumSet.noneOf(DiskRegionFlag.class);
                    String partitionName = dis.readUTF();
                    int startingBucketId = dis.readInt();
                    String compressorClassName = dis.readUTF();
                    if ("".equals(compressorClassName)) {
                        compressorClassName = null;
                    }
                    if (dis.readBoolean()) {
                        flags.add(DiskRegionFlag.IS_WITH_VERSIONING);
                    }
                    boolean offHeap = dis.readBoolean();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REGION_CONFIG_ID drId={}", drId);
                    }
                    interpreter.cmnRegionConfig(drId, lruAlgorithm, lruAction, lruLimit, concurrencyLevel, initialCapacity, loadFactor, statisticsEnabled, isBucket, flags, partitionName, startingBucketId, compressorClassName, offHeap);
                }
                break;
            case DiskInitFile.IFREC_OFFLINE_AND_EQUAL_MEMBER_ID:
                {
                    long drId = readDiskRegionID(dis);
                    PersistentMemberID pmid = readPMID(dis, gfversion);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_OFFLINE_AND_EQUAL_MEMBER_ID drId={} pmid={}", drId, pmid);
                    }
                    interpreter.cmdOfflineAndEqualMemberId(drId, pmid);
                }
                break;
            case DiskInitFile.IFREC_DISKSTORE_ID:
                {
                    long leastSigBits = dis.readLong();
                    long mostSigBits = dis.readLong();
                    readEndOfRecord(dis);
                    result = new DiskStoreID(mostSigBits, leastSigBits);
                    interpreter.cmnDiskStoreID(result);
                }
                break;
            case DiskInitFile.OPLOG_MAGIC_SEQ_ID:
                readOplogMagicSeqRecord(dis, OPLOG_TYPE.IF);
                break;
            case DiskInitFile.IFREC_PR_CREATE:
                {
                    String name = dis.readUTF();
                    int numBuckets = dis.readInt();
                    String colocatedWith = dis.readUTF();
                    readEndOfRecord(dis);
                    PRPersistentConfig config = new PRPersistentConfig(numBuckets, colocatedWith);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_PR_CREATE name={}, config={}", name, config);
                    }
                    interpreter.cmnPRCreate(name, config);
                }
                break;
            case DiskInitFile.IFREC_GEMFIRE_VERSION:
                {
                    short ver = Version.readOrdinal(dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_GEMFIRE_VERSION version={}", ver);
                    }
                    try {
                        gfversion = Version.fromOrdinal(ver, false);
                    } catch (UnsupportedVersionException e) {
                        throw new DiskAccessException(LocalizedStrings.Oplog_UNEXPECTED_PRODUCT_VERSION_0.toLocalizedString(ver), e, this.interpreter.getNameForError());
                    }
                    interpreter.cmnGemfireVersion(gfversion);
                    break;
                }
            case DiskInitFile.IFREC_PR_DESTROY:
                {
                    String name = dis.readUTF();
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_PR_DESTROY name={}", name);
                    }
                    interpreter.cmnPRDestroy(name);
                }
                break;
            case DiskInitFile.IFREC_ADD_CANONICAL_MEMBER_ID:
                {
                    int id = dis.readInt();
                    Object object = DataSerializer.readObject(dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_ADD_CANONICAL_MEMBER_ID id={} name={}", id, object);
                    }
                    interpreter.cmnAddCanonicalMemberId(id, object);
                    break;
                }
            case DiskInitFile.IFREC_REVOKE_DISK_STORE_ID:
                {
                    PersistentMemberPattern pattern = new PersistentMemberPattern();
                    InternalDataSerializer.invokeFromData(pattern, dis);
                    readEndOfRecord(dis);
                    if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                        logger.trace(LogMarker.PERSIST_RECOVERY, "IFREC_REVOKE_DISK_STORE_ID id={}" + pattern);
                    }
                    interpreter.cmnRevokeDiskStoreId(pattern);
                }
                break;
            default:
                throw new DiskAccessException(LocalizedStrings.DiskInitFile_UNKNOWN_OPCODE_0_FOUND.toLocalizedString(opCode), this.interpreter.getNameForError());
        }
        if (interpreter.isClosing()) {
            break;
        }
    }
    return result;
}
Also used : EnumSet(java.util.EnumSet) RegionVersionHolder(org.apache.geode.internal.cache.versions.RegionVersionHolder) DiskRegionFlag(org.apache.geode.internal.cache.DiskInitFile.DiskRegionFlag) Version(org.apache.geode.internal.Version) DiskAccessException(org.apache.geode.cache.DiskAccessException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) UnsupportedVersionException(org.apache.geode.cache.UnsupportedVersionException)

Example 75 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class PersistentRVVRecoveryDUnitTest method testWriteCorrectVersionToKrf.

/**
   * Test that when we generate a krf, we write the version tag that matches the entry in the crf.
   */
@Test
public void testWriteCorrectVersionToKrf() throws Throwable {
    Host host = Host.getHost(0);
    final VM vm0 = host.getVM(1);
    final LocalRegion region = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
    // The idea here is to do a bunch of puts with async persistence
    // At some point the oplog will switch. At that time, we wait for a krf
    // to be created and then throw an exception to shutdown the disk store.
    //
    // This should cause us to create a krf with some entries that have been
    // modified since the crf was written and are still in the async queue.
    //
    // To avoid deadlocks, we need to mark that the oplog was switched,
    // and then do the wait in the flusher thread.
    // Setup the callbacks to wait for krf creation and throw an exception
    IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
    LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
    try {
        final CountDownLatch krfCreated = new CountDownLatch(1);
        final AtomicBoolean oplogSwitched = new AtomicBoolean(false);
        CacheObserverHolder.setInstance(new CacheObserverAdapter() {

            @Override
            public void afterKrfCreated() {
                krfCreated.countDown();
            }

            @Override
            public void afterWritingBytes() {
                if (oplogSwitched.get()) {
                    try {
                        if (!krfCreated.await(3000, TimeUnit.SECONDS)) {
                            fail("KRF was not created in 30 seconds!");
                        }
                    } catch (InterruptedException e) {
                        fail("interrupted");
                    }
                    // Force a failure
                    throw new DiskAccessException();
                }
            }

            @Override
            public void afterSwitchingOplog() {
                oplogSwitched.set(true);
            }
        });
        // This is just to make sure the first oplog is not completely garbage.
        region.put("testkey", "key");
        // Do some puts to trigger an oplog roll.
        try {
            // Starting with a value of 1 means the value should match
            // the region version for easier debugging.
            int i = 1;
            while (krfCreated.getCount() > 0) {
                i++;
                region.put("key" + (i % 3), i);
                Thread.sleep(2);
            }
        } catch (CacheClosedException | DiskAccessException expected) {
        // do nothing
        }
        // Wait for the region to be destroyed. The region won't be destroyed
        // until the async flusher thread ends up switching oplogs
        Wait.waitForCriterion(new WaitCriterion() {

            @Override
            public boolean done() {
                return region.isDestroyed();
            }

            @Override
            public String description() {
                return "Region was not destroyed : " + region.isDestroyed();
            }
        }, 3000 * 1000, 100, true);
        closeCache();
    } finally {
        ex.remove();
        LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        CacheObserverHolder.setInstance(null);
    }
    // Get the version tags from the krf
    LocalRegion recoveredRegion = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
    VersionTag[] tagsFromKrf = new VersionTag[3];
    for (int i = 0; i < 3; i++) {
        NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
        tagsFromKrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
        LogWriterUtils.getLogWriter().info("krfTag[" + i + "]=" + tagsFromKrf[i] + ",value=" + entry.getValue());
    }
    closeCache();
    // Set a system property to skip recovering from the krf so we can
    // get the version tag from the crf.
    System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "true");
    try {
        // Get the version tags from the crf
        recoveredRegion = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
        VersionTag[] tagsFromCrf = new VersionTag[3];
        for (int i = 0; i < 3; i++) {
            NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
            tagsFromCrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
            LogWriterUtils.getLogWriter().info("crfTag[" + i + "]=" + tagsFromCrf[i] + ",value=" + entry.getValue());
        }
        // Make sure the version tags from the krf and the crf match.
        for (int i = 0; i < 3; i++) {
            assertEquals(tagsFromCrf[i], tagsFromKrf[i]);
        }
    } finally {
        System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "false");
    }
}
Also used : NonTXEntry(org.apache.geode.internal.cache.LocalRegion.NonTXEntry) Host(org.apache.geode.test.dunit.Host) LocalRegion(org.apache.geode.internal.cache.LocalRegion) CacheClosedException(org.apache.geode.cache.CacheClosedException) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) VM(org.apache.geode.test.dunit.VM) CacheObserverAdapter(org.apache.geode.internal.cache.CacheObserverAdapter) DiskAccessException(org.apache.geode.cache.DiskAccessException) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) IgnoredException(org.apache.geode.test.dunit.IgnoredException) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

DiskAccessException (org.apache.geode.cache.DiskAccessException)76 IOException (java.io.IOException)44 InterruptedIOException (java.io.InterruptedIOException)17 StoredObject (org.apache.geode.internal.offheap.StoredObject)13 HeapDataOutputStream (org.apache.geode.internal.HeapDataOutputStream)11 ByteBuffer (java.nio.ByteBuffer)9 Test (org.junit.Test)8 Version (org.apache.geode.internal.Version)6 File (java.io.File)5 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)5 IndexManager (org.apache.geode.cache.query.internal.index.IndexManager)5 UninterruptibleFileChannel (org.apache.geode.internal.cache.persistence.UninterruptibleFileChannel)5 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)5 Released (org.apache.geode.internal.offheap.annotations.Released)5 BufferedInputStream (java.io.BufferedInputStream)4 FileInputStream (java.io.FileInputStream)4 CancelException (org.apache.geode.CancelException)4 BytesAndBits (org.apache.geode.internal.cache.persistence.BytesAndBits)4 UninterruptibleRandomAccessFile (org.apache.geode.internal.cache.persistence.UninterruptibleRandomAccessFile)4 EOFException (java.io.EOFException)3