Search in sources :

Example 6 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class AbstractRegionMap method txApplyPut.

public void txApplyPut(Operation p_putOp, Object key, Object nv, boolean didDestroy, TransactionId txId, TXRmtEvent txEvent, EventID eventId, Object aCallbackArgument, List<EntryEventImpl> pendingCallbacks, FilterRoutingInfo filterRoutingInfo, ClientProxyMembershipID bridgeContext, TXEntryState txEntryState, VersionTag versionTag, long tailKey) {
    final LocalRegion owner = _getOwner();
    if (owner == null) {
        // "fix" for bug 32440
        Assert.assertTrue(false, "The owner for RegionMap " + this + " is null");
    }
    Operation putOp = p_putOp;
    Object newValue = nv;
    final boolean hasRemoteOrigin = !((TXId) txId).getMemberId().equals(owner.getMyId());
    final boolean isTXHost = txEntryState != null;
    final boolean isClientTXOriginator = owner.cache.isClient() && !hasRemoteOrigin;
    final boolean isRegionReady = owner.isInitialized();
    @Released EntryEventImpl cbEvent = null;
    boolean invokeCallbacks = shouldCreateCBEvent(owner, isRegionReady);
    boolean cbEventInPending = false;
    cbEvent = createCBEvent(owner, putOp, key, newValue, txId, txEvent, eventId, aCallbackArgument, filterRoutingInfo, bridgeContext, txEntryState, versionTag, tailKey);
    try {
        if (logger.isDebugEnabled()) {
            logger.debug("txApplyPut cbEvent={}", cbEvent);
        }
        if (owner.isUsedForPartitionedRegionBucket()) {
            newValue = EntryEventImpl.getCachedDeserializable(nv, cbEvent);
            txHandleWANEvent(owner, cbEvent, txEntryState);
        }
        boolean opCompleted = false;
        // Fix for Bug #44431. We do NOT want to update the region and wait
        // later for index INIT as region.clear() can cause inconsistency if
        // happened in parallel as it also does index INIT.
        IndexManager oqlIndexManager = owner.getIndexManager();
        if (oqlIndexManager != null) {
            oqlIndexManager.waitForIndexInit();
        }
        try {
            if (hasRemoteOrigin && !isTXHost && !isClientTXOriginator) {
                // Otherwise use the standard create/update logic
                if (!owner.isAllEvents() || (!putOp.isCreate() && isRegionReady)) {
                    // At this point we should only apply the update if the entry exists
                    // Fix for bug 32347.
                    RegionEntry re = getEntry(key);
                    if (re != null) {
                        synchronized (re) {
                            if (!re.isRemoved()) {
                                opCompleted = true;
                                putOp = putOp.getCorrespondingUpdateOp();
                                // Net writers are not called for received transaction data
                                final int oldSize = owner.calculateRegionEntryValueSize(re);
                                if (cbEvent != null) {
                                    cbEvent.setRegionEntry(re);
                                    // OFFHEAP eei
                                    cbEvent.setOldValue(re.getValueInVM(owner));
                                }
                                boolean clearOccured = false;
                                // Set RegionEntry updateInProgress
                                if (owner.indexMaintenanceSynchronous) {
                                    re.setUpdateInProgress(true);
                                }
                                try {
                                    txRemoveOldIndexEntry(putOp, re);
                                    if (didDestroy) {
                                        re.txDidDestroy(owner.cacheTimeMillis());
                                    }
                                    if (txEvent != null) {
                                        txEvent.addPut(putOp, owner, re, re.getKey(), newValue, aCallbackArgument);
                                    }
                                    re.setValueResultOfSearch(putOp.isNetSearch());
                                    try {
                                        processAndGenerateTXVersionTag(owner, cbEvent, re, txEntryState);
                                        {
                                            re.setValue(owner, re.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
                                        }
                                        if (putOp.isCreate()) {
                                            owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(re));
                                        } else if (putOp.isUpdate()) {
                                            // Rahul : fix for 41694. Negative bucket size can also be
                                            // an issue with normal GFE Delta and will have to be fixed
                                            // in a similar manner and may be this fix the the one for
                                            // other delta can be combined.
                                            {
                                                owner.updateSizeOnPut(key, oldSize, owner.calculateRegionEntryValueSize(re));
                                            }
                                        }
                                    } catch (RegionClearedException rce) {
                                        clearOccured = true;
                                    }
                                    {
                                        long lastMod = owner.cacheTimeMillis();
                                        EntryLogger.logTXPut(_getOwnerObject(), key, nv);
                                        re.updateStatsForPut(lastMod, lastMod);
                                        owner.txApplyPutPart2(re, re.getKey(), lastMod, false, didDestroy, clearOccured);
                                    }
                                } finally {
                                    if (re != null && owner.indexMaintenanceSynchronous) {
                                        re.setUpdateInProgress(false);
                                    }
                                }
                                if (invokeCallbacks) {
                                    cbEvent.makeUpdate();
                                    switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                    if (pendingCallbacks == null) {
                                        owner.invokeTXCallbacks(EnumListenerEvent.AFTER_UPDATE, cbEvent, hasRemoteOrigin);
                                    } else {
                                        pendingCallbacks.add(cbEvent);
                                        cbEventInPending = true;
                                    }
                                }
                                if (!clearOccured) {
                                    lruEntryUpdate(re);
                                }
                            }
                        }
                        if (didDestroy && !opCompleted) {
                            owner.txApplyInvalidatePart2(re, re.getKey(), true, false);
                        }
                    }
                    if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
                        txEntryState.setVersionTag(cbEvent.getVersionTag());
                    }
                    return;
                }
            }
            RegionEntry newRe = getEntryFactory().createEntry(owner, key, Token.REMOVED_PHASE1);
            synchronized (newRe) {
                try {
                    RegionEntry oldRe = putEntryIfAbsent(key, newRe);
                    while (!opCompleted && oldRe != null) {
                        synchronized (oldRe) {
                            if (oldRe.isRemovedPhase2()) {
                                owner.getCachePerfStats().incRetries();
                                _getMap().remove(key, oldRe);
                                oldRe = putEntryIfAbsent(key, newRe);
                            } else {
                                opCompleted = true;
                                if (!oldRe.isRemoved()) {
                                    putOp = putOp.getCorrespondingUpdateOp();
                                }
                                // Net writers are not called for received transaction data
                                final int oldSize = owner.calculateRegionEntryValueSize(oldRe);
                                final boolean oldIsRemoved = oldRe.isDestroyedOrRemoved();
                                if (cbEvent != null) {
                                    cbEvent.setRegionEntry(oldRe);
                                    // OFFHEAP eei
                                    cbEvent.setOldValue(oldRe.getValueInVM(owner));
                                }
                                boolean clearOccured = false;
                                // Set RegionEntry updateInProgress
                                if (owner.indexMaintenanceSynchronous) {
                                    oldRe.setUpdateInProgress(true);
                                }
                                try {
                                    txRemoveOldIndexEntry(putOp, oldRe);
                                    if (didDestroy) {
                                        oldRe.txDidDestroy(owner.cacheTimeMillis());
                                    }
                                    if (txEvent != null) {
                                        txEvent.addPut(putOp, owner, oldRe, oldRe.getKey(), newValue, aCallbackArgument);
                                    }
                                    oldRe.setValueResultOfSearch(putOp.isNetSearch());
                                    try {
                                        processAndGenerateTXVersionTag(owner, cbEvent, oldRe, txEntryState);
                                        boolean wasTombstone = oldRe.isTombstone();
                                        {
                                            oldRe.setValue(owner, oldRe.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
                                            if (wasTombstone) {
                                                owner.unscheduleTombstone(oldRe);
                                            }
                                        }
                                        if (putOp.isCreate()) {
                                            owner.updateSizeOnCreate(key, owner.calculateRegionEntryValueSize(oldRe));
                                        } else if (putOp.isUpdate()) {
                                            // Rahul : fix for 41694. Negative bucket size can also be
                                            // an issue with normal GFE Delta and will have to be fixed
                                            // in a similar manner and may be this fix the the one for
                                            // other delta can be combined.
                                            {
                                                owner.updateSizeOnPut(key, oldSize, owner.calculateRegionEntryValueSize(oldRe));
                                            }
                                        }
                                    } catch (RegionClearedException rce) {
                                        clearOccured = true;
                                    }
                                    {
                                        long lastMod = owner.cacheTimeMillis();
                                        EntryLogger.logTXPut(_getOwnerObject(), key, nv);
                                        oldRe.updateStatsForPut(lastMod, lastMod);
                                        owner.txApplyPutPart2(oldRe, oldRe.getKey(), lastMod, false, didDestroy, clearOccured);
                                    }
                                } finally {
                                    if (oldRe != null && owner.indexMaintenanceSynchronous) {
                                        oldRe.setUpdateInProgress(false);
                                    }
                                }
                                if (invokeCallbacks) {
                                    if (!oldIsRemoved) {
                                        cbEvent.makeUpdate();
                                    }
                                    switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                                    if (pendingCallbacks == null) {
                                        owner.invokeTXCallbacks(cbEvent.op.isCreate() ? EnumListenerEvent.AFTER_CREATE : EnumListenerEvent.AFTER_UPDATE, cbEvent, true);
                                    } else {
                                        pendingCallbacks.add(cbEvent);
                                        cbEventInPending = true;
                                    }
                                }
                                if (!clearOccured) {
                                    lruEntryUpdate(oldRe);
                                }
                            }
                        }
                    }
                    if (!opCompleted) {
                        putOp = putOp.getCorrespondingCreateOp();
                        if (cbEvent != null) {
                            cbEvent.setRegionEntry(newRe);
                            cbEvent.setOldValue(null);
                        }
                        boolean clearOccured = false;
                        // Set RegionEntry updateInProgress
                        if (owner.indexMaintenanceSynchronous) {
                            newRe.setUpdateInProgress(true);
                        }
                        try {
                            txRemoveOldIndexEntry(putOp, newRe);
                            // creating a new entry
                            if (didDestroy) {
                                newRe.txDidDestroy(owner.cacheTimeMillis());
                            }
                            if (txEvent != null) {
                                txEvent.addPut(putOp, owner, newRe, newRe.getKey(), newValue, aCallbackArgument);
                            }
                            newRe.setValueResultOfSearch(putOp.isNetSearch());
                            try {
                                processAndGenerateTXVersionTag(owner, cbEvent, newRe, txEntryState);
                                {
                                    newRe.setValue(owner, newRe.prepareValueForCache(owner, newValue, cbEvent, !putOp.isCreate()));
                                }
                                owner.updateSizeOnCreate(newRe.getKey(), owner.calculateRegionEntryValueSize(newRe));
                            } catch (RegionClearedException rce) {
                                clearOccured = true;
                            }
                            {
                                long lastMod = owner.cacheTimeMillis();
                                EntryLogger.logTXPut(_getOwnerObject(), key, nv);
                                newRe.updateStatsForPut(lastMod, lastMod);
                                owner.txApplyPutPart2(newRe, newRe.getKey(), lastMod, true, didDestroy, clearOccured);
                            }
                        } finally {
                            if (newRe != null && owner.indexMaintenanceSynchronous) {
                                newRe.setUpdateInProgress(false);
                            }
                        }
                        opCompleted = true;
                        if (invokeCallbacks) {
                            cbEvent.makeCreate();
                            cbEvent.setOldValue(null);
                            switchEventOwnerAndOriginRemote(cbEvent, hasRemoteOrigin);
                            if (pendingCallbacks == null) {
                                owner.invokeTXCallbacks(EnumListenerEvent.AFTER_CREATE, cbEvent, true);
                            } else {
                                pendingCallbacks.add(cbEvent);
                                cbEventInPending = true;
                            }
                        }
                        if (!clearOccured) {
                            lruEntryCreate(newRe);
                            incEntryCount(1);
                        }
                    }
                } finally {
                    if (!opCompleted) {
                        removeEntry(key, newRe, false);
                    }
                }
            }
            if (owner.concurrencyChecksEnabled && txEntryState != null && cbEvent != null) {
                txEntryState.setVersionTag(cbEvent.getVersionTag());
            }
        } catch (DiskAccessException dae) {
            owner.handleDiskAccessException(dae);
            throw dae;
        } finally {
            if (oqlIndexManager != null) {
                oqlIndexManager.countDownIndexUpdaters();
            }
        }
    } finally {
        if (!cbEventInPending)
            cbEvent.release();
    }
}
Also used : IndexManager(org.apache.geode.cache.query.internal.index.IndexManager) Released(org.apache.geode.internal.offheap.annotations.Released) DiskAccessException(org.apache.geode.cache.DiskAccessException) StoredObject(org.apache.geode.internal.offheap.StoredObject) Operation(org.apache.geode.cache.Operation)

Example 7 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class AbstractRegionMap method basicPut.

/*
   * returns null if the operation fails
   */
public RegionEntry basicPut(EntryEventImpl event, final long lastModified, final boolean ifNew, // only non-null if ifOld
final boolean ifOld, // only non-null if ifOld
Object expectedOldValue, boolean requireOldValue, final boolean overwriteDestroyed) throws CacheWriterException, TimeoutException {
    final LocalRegion owner = _getOwner();
    boolean clearOccured = false;
    if (owner == null) {
        // "fix" for bug 32440
        Assert.assertTrue(false, "The owner for RegionMap " + this + " is null for event " + event);
    }
    if (logger.isTraceEnabled(LogMarker.LRU_TOMBSTONE_COUNT) && !(owner instanceof HARegion)) {
        logger.trace(LogMarker.LRU_TOMBSTONE_COUNT, "ARM.basicPut called for {} expectedOldValue={} requireOldValue={} ifNew={} ifOld={} initialized={} overwriteDestroyed={}", event, expectedOldValue, requireOldValue, ifNew, ifOld, owner.isInitialized(), overwriteDestroyed);
    }
    RegionEntry result = null;
    long lastModifiedTime = 0;
    // copy into local var to prevent race condition with setter
    final CacheWriter cacheWriter = owner.basicGetWriter();
    final boolean cacheWrite = !event.isOriginRemote() && !event.isNetSearch() && event.isGenerateCallbacks() && (cacheWriter != null || owner.hasServerProxy() || owner.scope.isDistributed());
    /*
     * For performance reason, we try to minimize object creation and do as much work as we can
     * outside of synchronization, especially getting distribution advice.
     */
    final Set netWriteRecipients;
    if (cacheWrite) {
        if (cacheWriter == null && owner.scope.isDistributed()) {
            netWriteRecipients = ((DistributedRegion) owner).getCacheDistributionAdvisor().adviseNetWrite();
        } else {
            netWriteRecipients = null;
        }
    } else {
        netWriteRecipients = null;
    }
    // mbid: this has been added to maintain consistency between the disk region
    // and the region map after clear() has been called. This will set the
    // reference of the diskSegmentRegion as a ThreadLocal so that if the diskRegionSegment
    // is later changed by another thread, we can do the necessary.
    boolean uninitialized = !owner.isInitialized();
    boolean retrieveOldValueForDelta = event.getDeltaBytes() != null && event.getRawNewValue() == null;
    IndexManager oqlIndexManager = null;
    lockForCacheModification(owner, event);
    try {
        try {
            // Fix for Bug #44431. We do NOT want to update the region and wait
            // later for index INIT as region.clear() can cause inconsistency if
            // happened in parallel as it also does index INIT.
            oqlIndexManager = owner.getIndexManager();
            if (oqlIndexManager != null) {
                oqlIndexManager.waitForIndexInit();
            }
            // fix for bug #42169, replace must go to server if entry not on client
            boolean replaceOnClient = event.getOperation() == Operation.REPLACE && owner.getServerProxy() != null;
            // Rather than having two different blocks for synchronizing oldRe
            // and newRe, have only one block and synchronize re
            RegionEntry re = null;
            boolean eventRecorded = false;
            boolean onlyExisting = ifOld && !replaceOnClient;
            re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, onlyExisting, false);
            if (re == null) {
                return null;
            }
            while (true) {
                synchronized (re) {
                    // and change its state
                    if (re.isRemovedPhase2()) {
                        _getOwner().getCachePerfStats().incRetries();
                        _getMap().remove(event.getKey(), re);
                        re = getOrCreateRegionEntry(owner, event, Token.REMOVED_PHASE1, null, onlyExisting, false);
                        if (re == null) {
                            // this will happen when onlyExisting is true
                            return null;
                        }
                        continue;
                    } else {
                        @Released Object oldValueForDelta = null;
                        if (retrieveOldValueForDelta) {
                            // defer the lruUpdateCallback to prevent a deadlock (see bug 51121).
                            final boolean disabled = disableLruUpdateCallback();
                            try {
                                // Old value is faulted in from disk if not found in memory.
                                // OFFHEAP: if we are synced on oldRe no
                                oldValueForDelta = re.getValue(owner);
                            // issue since we can use ARE's ref
                            } finally {
                                if (disabled) {
                                    enableLruUpdateCallback();
                                }
                            }
                        }
                        try {
                            event.setRegionEntry(re);
                            // set old value in event
                            setOldValueInEvent(event, re, cacheWrite, requireOldValue);
                            if (!continueUpdate(re, event, ifOld, replaceOnClient)) {
                                return null;
                            }
                            // overwrite destroyed?
                            if (!continueOverwriteDestroyed(re, event, overwriteDestroyed, ifNew)) {
                                return null;
                            }
                            // check expectedOldValue
                            if (!satisfiesExpectedOldValue(event, re, expectedOldValue, replaceOnClient)) {
                                return null;
                            }
                            // invoke cacheWriter
                            invokeCacheWriter(re, event, cacheWrite, cacheWriter, netWriteRecipients, requireOldValue, expectedOldValue, replaceOnClient);
                            // notify index of an update
                            notifyIndex(re, true);
                            try {
                                try {
                                    if (// if there is a
                                    (cacheWrite && event.getOperation().isUpdate()) || // set
                                    !re.isRemoved() || replaceOnClient) {
                                        // update
                                        updateEntry(event, requireOldValue, oldValueForDelta, re);
                                    } else {
                                        // create
                                        createEntry(event, owner, re);
                                    }
                                    owner.recordEvent(event);
                                    eventRecorded = true;
                                } catch (RegionClearedException rce) {
                                    clearOccured = true;
                                    owner.recordEvent(event);
                                } catch (ConcurrentCacheModificationException ccme) {
                                    VersionTag tag = event.getVersionTag();
                                    if (tag != null && tag.isTimeStampUpdated()) {
                                        // Notify gateways of new time-stamp.
                                        owner.notifyTimestampsToGateways(event);
                                    }
                                    throw ccme;
                                }
                                if (uninitialized) {
                                    event.inhibitCacheListenerNotification(true);
                                }
                                updateLru(clearOccured, re, event);
                                lastModifiedTime = owner.basicPutPart2(event, re, !uninitialized, lastModifiedTime, clearOccured);
                            } finally {
                                notifyIndex(re, false);
                            }
                            result = re;
                            break;
                        } finally {
                            OffHeapHelper.release(oldValueForDelta);
                            if (re != null && !onlyExisting && !isOpComplete(re, event)) {
                                owner.cleanUpOnIncompleteOp(event, re);
                            } else if (re != null && owner.isUsedForPartitionedRegionBucket()) {
                                BucketRegion br = (BucketRegion) owner;
                                CachePerfStats stats = br.getPartitionedRegion().getCachePerfStats();
                            }
                        }
                    // try
                    }
                }
            // sync re
            }
        // end while
        } catch (DiskAccessException dae) {
            // Asif:Feel that it is safe to destroy the region here as there appears
            // to be no chance of deadlock during region destruction
            result = null;
            this._getOwner().handleDiskAccessException(dae);
            throw dae;
        } finally {
            if (oqlIndexManager != null) {
                oqlIndexManager.countDownIndexUpdaters();
            }
            if (result != null) {
                try {
                    // Note we do distribution after releasing all sync to avoid deadlock
                    final boolean invokeListeners = event.basicGetNewValue() != Token.TOMBSTONE;
                    owner.basicPutPart3(event, result, !uninitialized, lastModifiedTime, invokeListeners, ifNew, ifOld, expectedOldValue, requireOldValue);
                } finally {
                    // for any recipients
                    if (!clearOccured) {
                        try {
                            lruUpdateCallback();
                        } catch (DiskAccessException dae) {
                            // Asif:Feel that it is safe to destroy the region here as there appears
                            // to be no chance of deadlock during region destruction
                            result = null;
                            this._getOwner().handleDiskAccessException(dae);
                            throw dae;
                        }
                    }
                }
            // finally
            } else {
                resetThreadLocals();
            }
        }
    } finally {
        releaseCacheModificationLock(owner, event);
    }
    return result;
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) Released(org.apache.geode.internal.offheap.annotations.Released) ConcurrentCacheModificationException(org.apache.geode.internal.cache.versions.ConcurrentCacheModificationException) IndexManager(org.apache.geode.cache.query.internal.index.IndexManager) CacheWriter(org.apache.geode.cache.CacheWriter) VersionTag(org.apache.geode.internal.cache.versions.VersionTag) DiskAccessException(org.apache.geode.cache.DiskAccessException) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 8 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class OverflowOplog method basicGetForCompactor.

/**
   * Extracts the Value byte array & UserBit from the OpLog and inserts it in the wrapper Object of
   * type BytesAndBitsForCompactor which is passed
   * 
   * @param offsetInOplog The starting position from which to read the data in the opLog
   * @param bitOnly boolean indicating whether the value needs to be extracted along with the
   *        UserBit or not.
   * @param valueLength The length of the byte array which represents the value
   * @param userBits The userBits of the value.
   * @param wrapper Object of type BytesAndBitsForCompactor. The data is set in the wrapper Object.
   *        The wrapper Object also contains the user bit associated with the entry
   * @return true if data is found false if not
   */
private boolean basicGetForCompactor(long offsetInOplog, boolean bitOnly, int valueLength, byte userBits, BytesAndBitsForCompactor wrapper) {
    if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits) || bitOnly || valueLength == 0) {
        if (EntryBits.isInvalid(userBits)) {
            wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
        } else if (EntryBits.isTombstone(userBits)) {
            wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
        } else {
            wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
        }
    } else {
        try {
            synchronized (this.crf) {
                if (/*
               * !getParent().isSync() since compactor groups writes &&
               */
                (offsetInOplog + valueLength) > this.crf.bytesFlushed && !this.closed) {
                    // fix for bug 41205
                    flushAll();
                }
                final long writePosition = (this.doneAppending) ? this.crf.bytesFlushed : this.crf.raf.getFilePointer();
                if ((offsetInOplog + valueLength) > writePosition) {
                    throw new DiskAccessException(LocalizedStrings.Oplog_TRIED_TO_SEEK_TO_0_BUT_THE_FILE_LENGTH_IS_1_OPLOG_FILE_OBJECT_USED_FOR_READING_2.toLocalizedString(offsetInOplog + valueLength, writePosition, this.crf.raf), getParent().getName());
                } else if (offsetInOplog < 0) {
                    throw new DiskAccessException(LocalizedStrings.Oplog_CANNOT_FIND_RECORD_0_WHEN_READING_FROM_1.toLocalizedString(offsetInOplog, this.diskFile.getPath()), getParent().getName());
                }
                try {
                    this.crf.raf.seek(offsetInOplog);
                    this.stats.incOplogSeeks();
                    byte[] valueBytes = null;
                    if (wrapper.getBytes().length < valueLength) {
                        valueBytes = new byte[valueLength];
                        this.crf.raf.readFully(valueBytes);
                    } else {
                        valueBytes = wrapper.getBytes();
                        this.crf.raf.readFully(valueBytes, 0, valueLength);
                    }
                    this.stats.incOplogReads();
                    wrapper.setData(valueBytes, userBits, valueLength, true);
                } finally {
                    // if this oplog is no longer being appended to then don't waste disk io
                    if (!this.doneAppending) {
                        this.crf.raf.seek(writePosition);
                        this.stats.incOplogSeeks();
                    }
                // if (this.closed || this.deleted.get()) {
                // throw new DiskAccessException("attempting get on "
                // + (this.deleted.get() ? "destroyed" : "closed")
                // + " oplog #" + getOplogId(), this.owner);
                // }
                }
            }
        } catch (IOException ex) {
            throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FROM_0_OPLOG_DETAILS_1_2_3_4_5_6.toLocalizedString(this.diskFile.getPath(), (long) this.oplogId, offsetInOplog, this.crf.currSize, this.crf.bytesFlushed, false, false), ex, getParent().getName());
        } catch (IllegalStateException ex) {
            checkClosed();
            throw ex;
        }
    }
    return true;
}
Also used : DiskAccessException(org.apache.geode.cache.DiskAccessException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Example 9 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class OverflowOplog method basicGet.

/**
   * Extracts the Value byte array & UserBit from the OpLog
   * 
   * @param offsetInOplog The starting position from which to read the data in the opLog
   * @param bitOnly boolean indicating whether the value needs to be extracted along with the
   *        UserBit or not.
   * @param valueLength The length of the byte array which represents the value
   * @param userBits The userBits of the value.
   * @return BytesAndBits object which wraps the extracted value & user bit
   */
private BytesAndBits basicGet(DiskRegionView dr, long offsetInOplog, boolean bitOnly, int valueLength, byte userBits) {
    BytesAndBits bb = null;
    if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits) || bitOnly || valueLength == 0) {
        if (EntryBits.isInvalid(userBits)) {
            bb = new BytesAndBits(DiskEntry.INVALID_BYTES, userBits);
        } else if (EntryBits.isTombstone(userBits)) {
            bb = new BytesAndBits(DiskEntry.TOMBSTONE_BYTES, userBits);
        } else {
            bb = new BytesAndBits(DiskEntry.LOCAL_INVALID_BYTES, userBits);
        }
    } else {
        if (offsetInOplog == -1)
            return null;
        try {
            for (; ; ) {
                dr.getCancelCriterion().checkCancelInProgress(null);
                boolean interrupted = Thread.interrupted();
                try {
                    bb = attemptGet(dr, offsetInOplog, valueLength, userBits);
                    break;
                } catch (InterruptedIOException ignore) {
                // bug 39756
                // ignore, we'll clear and retry.
                } finally {
                    if (interrupted) {
                        Thread.currentThread().interrupt();
                    }
                }
            }
        // for
        } catch (IOException ex) {
            throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FROM_0_OPLOGID_1_OFFSET_BEING_READ_2_CURRENT_OPLOG_SIZE_3_ACTUAL_FILE_SIZE_4_IS_ASYNCH_MODE_5_IS_ASYNCH_WRITER_ALIVE_6.toLocalizedString(this.diskFile.getPath(), (long) this.oplogId, offsetInOplog, this.crf.currSize, this.crf.bytesFlushed, !dr.isSync(), false), ex, dr.getName());
        } catch (IllegalStateException ex) {
            checkClosed();
            throw ex;
        }
    }
    return bb;
}
Also used : InterruptedIOException(java.io.InterruptedIOException) DiskAccessException(org.apache.geode.cache.DiskAccessException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) BytesAndBits(org.apache.geode.internal.cache.persistence.BytesAndBits)

Example 10 with DiskAccessException

use of org.apache.geode.cache.DiskAccessException in project geode by apache.

the class DiskInitFile method writeIFRecord.

private void writeIFRecord(byte b, DiskRegionView dr, String s) {
    assert lock.isHeldByCurrentThread();
    try {
        int hdosSize = 1 + DR_ID_MAX_BYTES + estimateByteSize(s) + 1;
        if (hdosSize < 32) {
            hdosSize = 32;
        }
        HeapDataOutputStream hdos = new HeapDataOutputStream(hdosSize, Version.CURRENT);
        hdos.write(b);
        writeDiskRegionID(hdos, dr.getId());
        hdos.writeUTF(s);
        hdos.write(END_OF_RECORD_ID);
        writeIFRecord(hdos, true);
    } catch (IOException ex) {
        DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent);
        if (!this.compactInProgress) {
            this.parent.handleDiskAccessException(dae);
        }
        throw dae;
    }
}
Also used : HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) DiskAccessException(org.apache.geode.cache.DiskAccessException) IOException(java.io.IOException)

Aggregations

DiskAccessException (org.apache.geode.cache.DiskAccessException)76 IOException (java.io.IOException)44 InterruptedIOException (java.io.InterruptedIOException)17 StoredObject (org.apache.geode.internal.offheap.StoredObject)13 HeapDataOutputStream (org.apache.geode.internal.HeapDataOutputStream)11 ByteBuffer (java.nio.ByteBuffer)9 Test (org.junit.Test)8 Version (org.apache.geode.internal.Version)6 File (java.io.File)5 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)5 IndexManager (org.apache.geode.cache.query.internal.index.IndexManager)5 UninterruptibleFileChannel (org.apache.geode.internal.cache.persistence.UninterruptibleFileChannel)5 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)5 Released (org.apache.geode.internal.offheap.annotations.Released)5 BufferedInputStream (java.io.BufferedInputStream)4 FileInputStream (java.io.FileInputStream)4 CancelException (org.apache.geode.CancelException)4 BytesAndBits (org.apache.geode.internal.cache.persistence.BytesAndBits)4 UninterruptibleRandomAccessFile (org.apache.geode.internal.cache.persistence.UninterruptibleRandomAccessFile)4 EOFException (java.io.EOFException)3