use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.
the class DataSerializer method writeObjectAsByteArray.
/**
* Serialize the given object <code>obj</code> into a byte array using
* {@link #writeObject(Object, DataOutput)} and then writes the byte array to the given data
* output <code>out</code> in the same format {@link #writeByteArray(byte[], DataOutput)} does.
* This method will serialize a <code>null</code> obj and not throw a
* <code>NullPointerException</code>.
*
* @param obj the object to serialize and write
* @param out the data output to write the byte array to
* @throws IllegalArgumentException if a problem occurs while serialize <code>obj</code>
* @throws IOException if a problem occurs while writing to <code>out</code>
*
* @see #readByteArray
* @since GemFire 5.0.2
*/
public static void writeObjectAsByteArray(Object obj, DataOutput out) throws IOException {
Object object = obj;
if (obj instanceof CachedDeserializable) {
if (obj instanceof StoredObject) {
StoredObject so = (StoredObject) obj;
if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray StoredObject");
}
so.sendAsByteArray(out);
return;
} else {
object = ((CachedDeserializable) obj).getSerializedValue();
}
}
if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
if (object == null) {
logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray null");
} else {
logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray obj.getClass={}", object.getClass());
}
}
if (object instanceof byte[] || object == null) {
writeByteArray((byte[]) object, out);
} else if (out instanceof ObjToByteArraySerializer) {
((ObjToByteArraySerializer) out).writeAsSerializedByteArray(object);
} else /*
* else if (obj instanceof Sendable) { ((Sendable)obj).sendTo(out); }
*/
{
HeapDataOutputStream hdos;
if (object instanceof HeapDataOutputStream) {
hdos = (HeapDataOutputStream) object;
} else {
Version v = InternalDataSerializer.getVersionForDataStreamOrNull(out);
if (v == null) {
v = Version.CURRENT;
}
hdos = new HeapDataOutputStream(v);
try {
DataSerializer.writeObject(object, hdos);
} catch (IOException e) {
RuntimeException e2 = new IllegalArgumentException(LocalizedStrings.DataSerializer_PROBELM_WHILE_SERIALIZING.toLocalizedString());
e2.initCause(e);
throw e2;
}
}
InternalDataSerializer.writeArrayLength(hdos.size(), out);
hdos.sendTo(out);
}
}
use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.
the class Oplog method getBytesAndBitsForCompaction.
/**
* This function retrieves the value for an entry being compacted subject to entry referencing the
* oplog being compacted. Attempt is made to retrieve the value from in memory , if available,
* else from asynch buffers ( if asynch mode is enabled), else from the Oplog being compacted. It
* is invoked from switchOplog as well as OplogCompactor's compact function.
*
* @param entry DiskEntry being compacted referencing the Oplog being compacted
* @param wrapper Object of type BytesAndBitsForCompactor. The data if found is set in the wrapper
* Object. The wrapper Object also contains the user bit associated with the entry
* @return boolean false indicating that entry need not be compacted. If true it means that
* wrapper has been appropriately filled with data
*/
private boolean getBytesAndBitsForCompaction(DiskRegionView dr, DiskEntry entry, BytesAndBitsForCompactor wrapper) {
// caller is synced on did
DiskId did = entry.getDiskId();
byte userBits = 0;
long oplogOffset = did.getOffsetInOplog();
ReferenceCountHelper.skipRefCountTracking();
@Retained @Released Object value = entry._getValueRetain(dr, true);
ReferenceCountHelper.unskipRefCountTracking();
boolean foundData = false;
if (value == null) {
// If the mode is synch it is guaranteed to be present in the disk
foundData = basicGetForCompactor(dr, oplogOffset, false, did.getValueLength(), did.getUserBits(), wrapper);
// it is impossible for this oplogId to change.
if (did.getOplogId() != getOplogId()) {
// if it is not then no need to compact it
return false;
} else {
// then we should have found data
assert foundData : "compactor get failed on oplog#" + getOplogId();
}
userBits = wrapper.getBits();
if (EntryBits.isAnyInvalid(userBits)) {
if (EntryBits.isInvalid(userBits)) {
wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
} else {
wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
}
} else if (EntryBits.isTombstone(userBits)) {
wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
}
if (EntryBits.isWithVersions(did.getUserBits())) {
userBits = EntryBits.setWithVersions(userBits, true);
}
} else {
foundData = true;
userBits = 0;
if (EntryBits.isRecoveredFromDisk(did.getUserBits())) {
userBits = EntryBits.setRecoveredFromDisk(userBits, true);
}
if (EntryBits.isWithVersions(did.getUserBits())) {
userBits = EntryBits.setWithVersions(userBits, true);
}
// (the compactor) are writing the value out to disk.
if (value == Token.INVALID) {
userBits = EntryBits.setInvalid(userBits, true);
wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
} else if (value == Token.LOCAL_INVALID) {
userBits = EntryBits.setLocalInvalid(userBits, true);
wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
} else if (value == Token.TOMBSTONE) {
userBits = EntryBits.setTombstone(userBits, true);
wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
} else if (value instanceof CachedDeserializable) {
CachedDeserializable proxy = (CachedDeserializable) value;
if (proxy instanceof StoredObject) {
@Released StoredObject ohproxy = (StoredObject) proxy;
try {
ohproxy.fillSerializedValue(wrapper, userBits);
} finally {
OffHeapHelper.releaseWithNoTracking(ohproxy);
}
} else {
userBits = EntryBits.setSerialized(userBits, true);
proxy.fillSerializedValue(wrapper, userBits);
}
} else if (value instanceof byte[]) {
byte[] valueBytes = (byte[]) value;
// If the value is already a byte array then the user bit
// is 0, which is the default value of the userBits variable,
// indicating that it is non serialized data. Thus it is
// to be used as it is & not to be deserialized to
// convert into Object
wrapper.setData(valueBytes, userBits, valueBytes.length, false);
} else if (Token.isRemoved(value) && value != Token.TOMBSTONE) {
// TODO - RVV - We need to handle tombstones differently here!
if (entry.getDiskId().isPendingAsync()) {
entry.getDiskId().setPendingAsync(false);
try {
getOplogSet().getChild().basicRemove(dr, entry, false, false);
} catch (IOException ex) {
getParent().getCancelCriterion().checkCancelInProgress(ex);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0.toLocalizedString(this.diskFile.getPath()), ex, dr.getName());
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
getParent().getCache().getCancelCriterion().checkCancelInProgress(ie);
throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0_DUE_TO_FAILURE_IN_ACQUIRING_READ_LOCK_FOR_ASYNCH_WRITING.toLocalizedString(this.diskFile.getPath()), ie, dr.getName());
}
} else {
rmLive(dr, entry);
}
foundData = false;
} else {
userBits = EntryBits.setSerialized(userBits, true);
EntryEventImpl.fillSerializedValue(wrapper, value, userBits);
}
}
if (foundData) {
// since the compactor is writing it out clear the async flag
entry.getDiskId().setPendingAsync(false);
}
return foundData;
}
use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.
the class EntryEventImpl method exportNewValue.
/**
* Export the event's new value to the given importer.
*/
public void exportNewValue(NewValueImporter importer) {
final boolean prefersSerialized = importer.prefersNewSerialized();
if (prefersSerialized) {
if (getCachedSerializedNewValue() != null) {
importer.importNewBytes(getCachedSerializedNewValue(), true);
return;
} else if (this.newValueBytes != null && this.newValue instanceof CachedDeserializable) {
importer.importNewBytes(this.newValueBytes, true);
return;
}
}
@Unretained(ENTRY_EVENT_NEW_VALUE) final Object nv = getRawNewValue();
if (nv instanceof StoredObject) {
@Unretained(ENTRY_EVENT_NEW_VALUE) final StoredObject so = (StoredObject) nv;
final boolean isSerialized = so.isSerialized();
if (importer.isUnretainedNewReferenceOk()) {
importer.importNewObject(nv, isSerialized);
} else if (!isSerialized || prefersSerialized) {
byte[] bytes = so.getValueAsHeapByteArray();
importer.importNewBytes(bytes, isSerialized);
if (isSerialized) {
setCachedSerializedNewValue(bytes);
}
} else {
importer.importNewObject(so.getValueAsDeserializedHeapObject(), true);
}
} else if (nv instanceof byte[]) {
importer.importNewBytes((byte[]) nv, false);
} else if (nv instanceof CachedDeserializable) {
CachedDeserializable cd = (CachedDeserializable) nv;
Object cdV = cd.getValue();
if (cdV instanceof byte[]) {
importer.importNewBytes((byte[]) cdV, true);
setCachedSerializedNewValue((byte[]) cdV);
} else {
importer.importNewObject(cdV, true);
}
} else {
importer.importNewObject(nv, true);
}
}
use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.
the class EntryEventImpl method setNewValueInRegion.
@Retained(ENTRY_EVENT_NEW_VALUE)
private void setNewValueInRegion(final LocalRegion owner, final RegionEntry reentry, Object oldValueForDelta) throws RegionClearedException {
boolean wasTombstone = reentry.isTombstone();
// not be applied. This is possible if the event originated locally.
if (this.deltaBytes != null && this.newValue == null) {
processDeltaBytes(oldValueForDelta);
}
if (owner != null) {
owner.generateAndSetVersionTag(this, reentry);
} else {
this.region.generateAndSetVersionTag(this, reentry);
}
Object v = this.newValue;
if (v == null) {
v = isLocalInvalid() ? Token.LOCAL_INVALID : Token.INVALID;
} else {
this.region.regionInvalid = false;
}
reentry.setValueResultOfSearch(this.op.isNetSearch());
// in the primary.
if (v instanceof org.apache.geode.Delta && region.isUsedForPartitionedRegionBucket()) {
int vSize;
Object ov = basicGetOldValue();
if (ov instanceof CachedDeserializable && !GemFireCacheImpl.DELTAS_RECALCULATE_SIZE) {
vSize = ((CachedDeserializable) ov).getValueSizeInBytes();
} else {
vSize = CachedDeserializableFactory.calcMemSize(v, region.getObjectSizer(), false);
}
v = CachedDeserializableFactory.create(v, vSize);
basicSetNewValue(v);
}
Object preparedV = reentry.prepareValueForCache(this.region, v, this, false);
if (preparedV != v) {
v = preparedV;
if (v instanceof StoredObject) {
if (!((StoredObject) v).isCompressed()) {
// fix bug 52109
// If we put it off heap and it is not compressed then remember that value.
// Otherwise we want to remember the decompressed value in the event.
basicSetNewValue(v);
}
}
}
boolean isTombstone = (v == Token.TOMBSTONE);
boolean success = false;
boolean calledSetValue = false;
try {
setNewValueBucketSize(owner, v);
if ((this.op.isUpdate() && !reentry.isInvalid()) || this.op.isInvalidate()) {
IndexManager idxManager = IndexUtils.getIndexManager(this.region, false);
if (idxManager != null) {
try {
idxManager.updateIndexes(reentry, IndexManager.REMOVE_ENTRY, this.op.isUpdate() ? IndexProtocol.BEFORE_UPDATE_OP : IndexProtocol.OTHER_OP);
} catch (QueryException e) {
throw new IndexMaintenanceException(e);
}
}
}
calledSetValue = true;
// already called prepareValueForCache
reentry.setValueWithTombstoneCheck(v, this);
success = true;
} finally {
if (!success && reentry instanceof OffHeapRegionEntry && v instanceof StoredObject) {
OffHeapRegionEntryHelper.releaseEntry((OffHeapRegionEntry) reentry, (StoredObject) v);
}
}
if (logger.isTraceEnabled()) {
if (v instanceof CachedDeserializable) {
logger.trace("EntryEventImpl.setNewValueInRegion: put CachedDeserializable({},{})", this.getKey(), ((CachedDeserializable) v).getStringForm());
} else {
logger.trace("EntryEventImpl.setNewValueInRegion: put({},{})", this.getKey(), StringUtils.forceToString(v));
}
}
if (!isTombstone && wasTombstone) {
owner.unscheduleTombstone(reentry);
}
}
use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.
the class EntryEventImpl method retainAndSetOldValue.
@Released(ENTRY_EVENT_OLD_VALUE)
private void retainAndSetOldValue(@Retained(ENTRY_EVENT_OLD_VALUE) Object v) {
if (v == this.oldValue)
return;
if (isOffHeapReference(v)) {
StoredObject so = (StoredObject) v;
if (ReferenceCountHelper.trackReferenceCounts()) {
ReferenceCountHelper.setReferenceCountOwner(new OldValueOwner());
boolean couldNotRetain = (!so.retain());
ReferenceCountHelper.setReferenceCountOwner(null);
if (couldNotRetain) {
this.oldValue = null;
return;
}
} else {
if (!so.retain()) {
this.oldValue = null;
return;
}
}
}
basicSetOldValue(v);
}
Aggregations