use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class Oplog method modify.
/**
* Modifies a key/value pair from a region entry on disk. Updates all of the necessary
* {@linkplain DiskStoreStats statistics} and invokes basicModify
* <p>
* Modified the code so as to reuse the already created ByteBuffer during transition. Minimizing
* the synchronization allowing multiple put operations for different entries to proceed
* concurrently for asynch mode
*
* @param entry DiskEntry object representing the current Entry
*
* @param value byte array representing the value
*/
public void modify(LocalRegion region, DiskEntry entry, ValueWrapper value, boolean async) {
if (getOplogSet().getChild() != this) {
getOplogSet().getChild().modify(region, entry, value, async);
} else {
DiskId did = entry.getDiskId();
boolean exceptionOccurred = false;
byte prevUsrBit = did.getUserBits();
int len = did.getValueLength();
try {
byte userBits = calcUserBits(value);
// 7.0
if (entry.getVersionStamp() != null) {
if (entry.getVersionStamp().getMemberID() == null) {
throw new AssertionError("Version stamp should have a member at this point for entry " + entry);
}
// pdx and tx will not use version
userBits = EntryBits.setWithVersions(userBits, true);
}
basicModify(region.getDiskRegion(), entry, value, userBits, async, false);
} catch (IOException ex) {
exceptionOccurred = true;
region.getCancelCriterion().checkCancelInProgress(ex);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0.toLocalizedString(this.diskFile.getPath()), ex, region.getFullPath());
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
exceptionOccurred = true;
region.getCancelCriterion().checkCancelInProgress(ie);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0_DUE_TO_FAILURE_IN_ACQUIRING_READ_LOCK_FOR_ASYNCH_WRITING.toLocalizedString(this.diskFile.getPath()), ie, region.getFullPath());
} finally {
if (exceptionOccurred) {
did.setValueLength(len);
did.setUserBits(prevUsrBit);
}
}
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class Oplog method offlineModify.
public void offlineModify(DiskRegionView drv, DiskEntry entry, byte[] value, boolean isSerializedObject) {
try {
ValueWrapper vw = new DiskEntry.Helper.ByteArrayValueWrapper(isSerializedObject, value);
byte userBits = calcUserBits(vw);
// save versions for creates and updates even if value is bytearrary in 7.0
VersionStamp vs = entry.getVersionStamp();
if (vs != null) {
if (vs.getMemberID() == null) {
throw new AssertionError("Version stamp should have a member at this point for entry " + entry);
}
// Since we are modifying this entry's value while offline make sure its version stamp
// has this disk store as its member id and bump the version
vs.setMemberID(getParent().getDiskStoreID());
VersionTag vt = vs.asVersionTag();
vt.setRegionVersion(drv.getRegionVersionVector().getNextVersion());
vt.setEntryVersion(vt.getEntryVersion() + 1);
vt.setVersionTimeStamp(System.currentTimeMillis());
vs.setVersions(vt);
userBits = EntryBits.setWithVersions(userBits, true);
}
basicModify(drv, entry, vw, userBits, false, false);
} catch (IOException ex) {
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0.toLocalizedString(this.diskFile.getPath()), ex, drv.getName());
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0_DUE_TO_FAILURE_IN_ACQUIRING_READ_LOCK_FOR_ASYNCH_WRITING.toLocalizedString(this.diskFile.getPath()), ie, drv.getName());
}
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class Oplog method basicGet.
/**
* Extracts the Value byte array & UserBit from the OpLog
*
* @param offsetInOplog The starting position from which to read the data in the opLog
* @param bitOnly boolean indicating whether the value needs to be extracted along with the
* UserBit or not.
* @param valueLength The length of the byte array which represents the value
* @param userBits The userBits of the value.
* @return BytesAndBits object which wraps the extracted value & user bit
*/
private BytesAndBits basicGet(DiskRegionView dr, long offsetInOplog, boolean bitOnly, int valueLength, byte userBits) {
BytesAndBits bb = null;
if (EntryBits.isAnyInvalid(userBits) || EntryBits.isTombstone(userBits) || bitOnly || valueLength == 0) {
if (EntryBits.isInvalid(userBits)) {
bb = new BytesAndBits(DiskEntry.INVALID_BYTES, userBits);
} else if (EntryBits.isTombstone(userBits)) {
bb = new BytesAndBits(DiskEntry.TOMBSTONE_BYTES, userBits);
} else {
bb = new BytesAndBits(DiskEntry.LOCAL_INVALID_BYTES, userBits);
}
} else {
if (offsetInOplog == -1)
return null;
try {
for (; ; ) {
dr.getCancelCriterion().checkCancelInProgress(null);
boolean interrupted = Thread.interrupted();
try {
bb = attemptGet(dr, offsetInOplog, bitOnly, valueLength, userBits);
break;
} catch (InterruptedIOException ignore) {
// bug 39756
// ignore, we'll clear and retry.
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
// for
} catch (IOException ex) {
getParent().getCancelCriterion().checkCancelInProgress(ex);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FROM_0_OPLOGID_1_OFFSET_BEING_READ_2_CURRENT_OPLOG_SIZE_3_ACTUAL_FILE_SIZE_4_IS_ASYNCH_MODE_5_IS_ASYNCH_WRITER_ALIVE_6.toLocalizedString(this.diskFile.getPath(), this.oplogId, offsetInOplog, this.crf.currSize, this.crf.bytesFlushed, !dr.isSync(), Boolean.FALSE), ex, dr.getName());
} catch (IllegalStateException ex) {
checkClosed();
throw ex;
}
}
return bb;
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class Oplog method getBytesAndBitsForCompaction.
/**
* This function retrieves the value for an entry being compacted subject to entry referencing the
* oplog being compacted. Attempt is made to retrieve the value from in memory , if available,
* else from asynch buffers ( if asynch mode is enabled), else from the Oplog being compacted. It
* is invoked from switchOplog as well as OplogCompactor's compact function.
*
* @param entry DiskEntry being compacted referencing the Oplog being compacted
* @param wrapper Object of type BytesAndBitsForCompactor. The data if found is set in the wrapper
* Object. The wrapper Object also contains the user bit associated with the entry
* @return boolean false indicating that entry need not be compacted. If true it means that
* wrapper has been appropriately filled with data
*/
private boolean getBytesAndBitsForCompaction(DiskRegionView dr, DiskEntry entry, BytesAndBitsForCompactor wrapper) {
// caller is synced on did
DiskId did = entry.getDiskId();
byte userBits = 0;
long oplogOffset = did.getOffsetInOplog();
ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object value = entry._getValueRetain(dr, true);
ReferenceCountHelper.unskipRefCountTracking();
boolean foundData = false;
if (value == null) {
// If the mode is synch it is guaranteed to be present in the disk
foundData = basicGetForCompactor(dr, oplogOffset, false, did.getValueLength(), did.getUserBits(), wrapper);
// it is impossible for this oplogId to change.
if (did.getOplogId() != getOplogId()) {
// if it is not then no need to compact it
return false;
} else {
// then we should have found data
assert foundData : "compactor get failed on oplog#" + getOplogId();
}
userBits = wrapper.getBits();
if (EntryBits.isAnyInvalid(userBits)) {
if (EntryBits.isInvalid(userBits)) {
wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
} else {
wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
}
} else if (EntryBits.isTombstone(userBits)) {
wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
}
if (EntryBits.isWithVersions(did.getUserBits())) {
userBits = EntryBits.setWithVersions(userBits, true);
}
} else {
foundData = true;
userBits = 0;
if (EntryBits.isRecoveredFromDisk(did.getUserBits())) {
userBits = EntryBits.setRecoveredFromDisk(userBits, true);
}
if (EntryBits.isWithVersions(did.getUserBits())) {
userBits = EntryBits.setWithVersions(userBits, true);
}
// (the compactor) are writing the value out to disk.
if (value == Token.INVALID) {
userBits = EntryBits.setInvalid(userBits, true);
wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
} else if (value == Token.LOCAL_INVALID) {
userBits = EntryBits.setLocalInvalid(userBits, true);
wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
} else if (value == Token.TOMBSTONE) {
userBits = EntryBits.setTombstone(userBits, true);
wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
} else if (value instanceof CachedDeserializable) {
CachedDeserializable proxy = (CachedDeserializable) value;
if (proxy instanceof StoredObject) {
@Released StoredObject ohproxy = (StoredObject) proxy;
try {
ohproxy.fillSerializedValue(wrapper, userBits);
} finally {
OffHeapHelper.releaseWithNoTracking(ohproxy);
}
} else {
userBits = EntryBits.setSerialized(userBits, true);
proxy.fillSerializedValue(wrapper, userBits);
}
} else if (value instanceof byte[]) {
byte[] valueBytes = (byte[]) value;
// If the value is already a byte array then the user bit
// is 0, which is the default value of the userBits variable,
// indicating that it is non serialized data. Thus it is
// to be used as it is & not to be deserialized to
// convert into Object
wrapper.setData(valueBytes, userBits, valueBytes.length, false);
} else if (Token.isRemoved(value) && value != Token.TOMBSTONE) {
// TODO - RVV - We need to handle tombstones differently here!
if (entry.getDiskId().isPendingAsync()) {
entry.getDiskId().setPendingAsync(false);
try {
getOplogSet().getChild().basicRemove(dr, entry, false, false);
} catch (IOException ex) {
getParent().getCancelCriterion().checkCancelInProgress(ex);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0.toLocalizedString(this.diskFile.getPath()), ex, dr.getName());
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
getParent().getCache().getCancelCriterion().checkCancelInProgress(ie);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0_DUE_TO_FAILURE_IN_ACQUIRING_READ_LOCK_FOR_ASYNCH_WRITING.toLocalizedString(this.diskFile.getPath()), ie, dr.getName());
}
} else {
rmLive(dr, entry);
}
foundData = false;
} else {
userBits = EntryBits.setSerialized(userBits, true);
EntryEventImpl.fillSerializedValue(wrapper, value, userBits);
}
}
if (foundData) {
// since the compactor is writing it out clear the async flag
entry.getDiskId().setPendingAsync(false);
}
return foundData;
}
use of org.apache.geode.cache.DiskAccessException in project geode by apache.
the class OverflowOplog method getBytesAndBits.
/**
* Returns the unserialized bytes and bits for the given Entry. If Oplog is destroyed while
* querying, then the DiskRegion is queried again to obatin the value This method should never get
* invoked for an entry which has been destroyed
*
* @since GemFire 3.2.1
* @param id The DiskId for the entry @param offset The offset in this OpLog where the entry is
* present. @param faultingIn @param bitOnly boolean indicating whether to extract just the
* UserBit or UserBit with value @return BytesAndBits object wrapping the value & user bit
*/
public BytesAndBits getBytesAndBits(DiskRegionView dr, DiskId id, boolean faultingIn, boolean bitOnly) {
OverflowOplog retryOplog = null;
long offset = 0;
synchronized (id) {
int opId = (int) id.getOplogId();
if (opId != getOplogId()) {
// the oplog changed on us so we need to do a recursive
// call after unsyncing
retryOplog = this.getOplogSet().getChild(opId);
} else {
// fetch this while synced so it will be consistent with oplogId
offset = id.getOffsetInOplog();
}
}
if (retryOplog != null) {
return retryOplog.getBytesAndBits(dr, id, faultingIn, bitOnly);
}
BytesAndBits bb = null;
long start = this.stats.startRead();
// the data is present in the current oplog file.
if (offset == -1) {
// Since it is given that a get operation has alreadty
// taken a
// lock on an entry , no put operation could have modified the
// oplog ID
// there fore synchronization is not needed
// synchronized (id) {
offset = id.getOffsetInOplog();
}
// is still open) we can retrieve the value from this oplog.
try {
bb = basicGet(dr, offset, bitOnly, id.getValueLength(), id.getUserBits());
} catch (DiskAccessException dae) {
logger.error(LocalizedMessage.create(LocalizedStrings.Oplog_OPLOGBASICGET_ERROR_IN_READING_THE_DATA_FROM_DISK_FOR_DISK_ID_HAVING_DATA_AS_0, id), dae);
throw dae;
}
if (bb == null) {
throw new EntryDestroyedException(LocalizedStrings.Oplog_NO_VALUE_WAS_FOUND_FOR_ENTRY_WITH_DISK_ID_0_ON_A_REGION_WITH_SYNCHRONOUS_WRITING_SET_TO_1.toLocalizedString(new Object[] { id, dr.isSync() }));
}
if (bitOnly) {
dr.endRead(start, this.stats.endRead(start, 1), 1);
} else {
dr.endRead(start, this.stats.endRead(start, bb.getBytes().length), bb.getBytes().length);
}
return bb;
}
Aggregations