Search in sources :

Example 21 with StoredObject

use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.

the class DataSerializer method writeObjectAsByteArray.

/**
   * Serialize the given object <code>obj</code> into a byte array using
   * {@link #writeObject(Object, DataOutput)} and then writes the byte array to the given data
   * output <code>out</code> in the same format {@link #writeByteArray(byte[], DataOutput)} does.
   * This method will serialize a <code>null</code> obj and not throw a
   * <code>NullPointerException</code>.
   *
   * @param obj the object to serialize and write
   * @param out the data output to write the byte array to
   * @throws IllegalArgumentException if a problem occurs while serialize <code>obj</code>
   * @throws IOException if a problem occurs while writing to <code>out</code>
   *
   * @see #readByteArray
   * @since GemFire 5.0.2
   */
public static void writeObjectAsByteArray(Object obj, DataOutput out) throws IOException {
    Object object = obj;
    if (obj instanceof CachedDeserializable) {
        if (obj instanceof StoredObject) {
            StoredObject so = (StoredObject) obj;
            if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
                logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray StoredObject");
            }
            so.sendAsByteArray(out);
            return;
        } else {
            object = ((CachedDeserializable) obj).getSerializedValue();
        }
    }
    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
        if (object == null) {
            logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray null");
        } else {
            logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray obj.getClass={}", object.getClass());
        }
    }
    if (object instanceof byte[] || object == null) {
        writeByteArray((byte[]) object, out);
    } else if (out instanceof ObjToByteArraySerializer) {
        ((ObjToByteArraySerializer) out).writeAsSerializedByteArray(object);
    } else /*
       * else if (obj instanceof Sendable) { ((Sendable)obj).sendTo(out); }
       */
    {
        HeapDataOutputStream hdos;
        if (object instanceof HeapDataOutputStream) {
            hdos = (HeapDataOutputStream) object;
        } else {
            Version v = InternalDataSerializer.getVersionForDataStreamOrNull(out);
            if (v == null) {
                v = Version.CURRENT;
            }
            hdos = new HeapDataOutputStream(v);
            try {
                DataSerializer.writeObject(object, hdos);
            } catch (IOException e) {
                RuntimeException e2 = new IllegalArgumentException(LocalizedStrings.DataSerializer_PROBELM_WHILE_SERIALIZING.toLocalizedString());
                e2.initCause(e);
                throw e2;
            }
        }
        InternalDataSerializer.writeArrayLength(hdos.size(), out);
        hdos.sendTo(out);
    }
}
Also used : ObjToByteArraySerializer(org.apache.geode.internal.ObjToByteArraySerializer) CachedDeserializable(org.apache.geode.internal.cache.CachedDeserializable) StoredObject(org.apache.geode.internal.offheap.StoredObject) Version(org.apache.geode.internal.Version) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) StoredObject(org.apache.geode.internal.offheap.StoredObject) IOException(java.io.IOException)

Example 22 with StoredObject

use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.

the class Oplog method getBytesAndBitsForCompaction.

/**
   * This function retrieves the value for an entry being compacted subject to entry referencing the
   * oplog being compacted. Attempt is made to retrieve the value from in memory , if available,
   * else from asynch buffers ( if asynch mode is enabled), else from the Oplog being compacted. It
   * is invoked from switchOplog as well as OplogCompactor's compact function.
   * 
   * @param entry DiskEntry being compacted referencing the Oplog being compacted
   * @param wrapper Object of type BytesAndBitsForCompactor. The data if found is set in the wrapper
   *        Object. The wrapper Object also contains the user bit associated with the entry
   * @return boolean false indicating that entry need not be compacted. If true it means that
   *         wrapper has been appropriately filled with data
   */
private boolean getBytesAndBitsForCompaction(DiskRegionView dr, DiskEntry entry, BytesAndBitsForCompactor wrapper) {
    // caller is synced on did
    DiskId did = entry.getDiskId();
    byte userBits = 0;
    long oplogOffset = did.getOffsetInOplog();
    ReferenceCountHelper.skipRefCountTracking();
    @Retained @Released Object value = entry._getValueRetain(dr, true);
    ReferenceCountHelper.unskipRefCountTracking();
    boolean foundData = false;
    if (value == null) {
        // If the mode is synch it is guaranteed to be present in the disk
        foundData = basicGetForCompactor(dr, oplogOffset, false, did.getValueLength(), did.getUserBits(), wrapper);
        // it is impossible for this oplogId to change.
        if (did.getOplogId() != getOplogId()) {
            // if it is not then no need to compact it
            return false;
        } else {
            // then we should have found data
            assert foundData : "compactor get failed on oplog#" + getOplogId();
        }
        userBits = wrapper.getBits();
        if (EntryBits.isAnyInvalid(userBits)) {
            if (EntryBits.isInvalid(userBits)) {
                wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
            } else {
                wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
            }
        } else if (EntryBits.isTombstone(userBits)) {
            wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
        }
        if (EntryBits.isWithVersions(did.getUserBits())) {
            userBits = EntryBits.setWithVersions(userBits, true);
        }
    } else {
        foundData = true;
        userBits = 0;
        if (EntryBits.isRecoveredFromDisk(did.getUserBits())) {
            userBits = EntryBits.setRecoveredFromDisk(userBits, true);
        }
        if (EntryBits.isWithVersions(did.getUserBits())) {
            userBits = EntryBits.setWithVersions(userBits, true);
        }
        // (the compactor) are writing the value out to disk.
        if (value == Token.INVALID) {
            userBits = EntryBits.setInvalid(userBits, true);
            wrapper.setData(DiskEntry.INVALID_BYTES, userBits, DiskEntry.INVALID_BYTES.length, false);
        } else if (value == Token.LOCAL_INVALID) {
            userBits = EntryBits.setLocalInvalid(userBits, true);
            wrapper.setData(DiskEntry.LOCAL_INVALID_BYTES, userBits, DiskEntry.LOCAL_INVALID_BYTES.length, false);
        } else if (value == Token.TOMBSTONE) {
            userBits = EntryBits.setTombstone(userBits, true);
            wrapper.setData(DiskEntry.TOMBSTONE_BYTES, userBits, DiskEntry.TOMBSTONE_BYTES.length, false);
        } else if (value instanceof CachedDeserializable) {
            CachedDeserializable proxy = (CachedDeserializable) value;
            if (proxy instanceof StoredObject) {
                @Released StoredObject ohproxy = (StoredObject) proxy;
                try {
                    ohproxy.fillSerializedValue(wrapper, userBits);
                } finally {
                    OffHeapHelper.releaseWithNoTracking(ohproxy);
                }
            } else {
                userBits = EntryBits.setSerialized(userBits, true);
                proxy.fillSerializedValue(wrapper, userBits);
            }
        } else if (value instanceof byte[]) {
            byte[] valueBytes = (byte[]) value;
            // If the value is already a byte array then the user bit
            // is 0, which is the default value of the userBits variable,
            // indicating that it is non serialized data. Thus it is
            // to be used as it is & not to be deserialized to
            // convert into Object
            wrapper.setData(valueBytes, userBits, valueBytes.length, false);
        } else if (Token.isRemoved(value) && value != Token.TOMBSTONE) {
            // TODO - RVV - We need to handle tombstones differently here!
            if (entry.getDiskId().isPendingAsync()) {
                entry.getDiskId().setPendingAsync(false);
                try {
                    getOplogSet().getChild().basicRemove(dr, entry, false, false);
                } catch (IOException ex) {
                    getParent().getCancelCriterion().checkCancelInProgress(ex);
                    throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0.toLocalizedString(this.diskFile.getPath()), ex, dr.getName());
                } catch (InterruptedException ie) {
                    Thread.currentThread().interrupt();
                    getParent().getCache().getCancelCriterion().checkCancelInProgress(ie);
                    throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_WRITING_KEY_TO_0_DUE_TO_FAILURE_IN_ACQUIRING_READ_LOCK_FOR_ASYNCH_WRITING.toLocalizedString(this.diskFile.getPath()), ie, dr.getName());
                }
            } else {
                rmLive(dr, entry);
            }
            foundData = false;
        } else {
            userBits = EntryBits.setSerialized(userBits, true);
            EntryEventImpl.fillSerializedValue(wrapper, value, userBits);
        }
    }
    if (foundData) {
        // since the compactor is writing it out clear the async flag
        entry.getDiskId().setPendingAsync(false);
    }
    return foundData;
}
Also used : Released(org.apache.geode.internal.offheap.annotations.Released) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Retained(org.apache.geode.internal.offheap.annotations.Retained) StoredObject(org.apache.geode.internal.offheap.StoredObject) DiskAccessException(org.apache.geode.cache.DiskAccessException) StoredObject(org.apache.geode.internal.offheap.StoredObject)

Example 23 with StoredObject

use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.

the class EntryEventImpl method exportNewValue.

/**
   * Export the event's new value to the given importer.
   */
public void exportNewValue(NewValueImporter importer) {
    final boolean prefersSerialized = importer.prefersNewSerialized();
    if (prefersSerialized) {
        if (getCachedSerializedNewValue() != null) {
            importer.importNewBytes(getCachedSerializedNewValue(), true);
            return;
        } else if (this.newValueBytes != null && this.newValue instanceof CachedDeserializable) {
            importer.importNewBytes(this.newValueBytes, true);
            return;
        }
    }
    @Unretained(ENTRY_EVENT_NEW_VALUE) final Object nv = getRawNewValue();
    if (nv instanceof StoredObject) {
        @Unretained(ENTRY_EVENT_NEW_VALUE) final StoredObject so = (StoredObject) nv;
        final boolean isSerialized = so.isSerialized();
        if (importer.isUnretainedNewReferenceOk()) {
            importer.importNewObject(nv, isSerialized);
        } else if (!isSerialized || prefersSerialized) {
            byte[] bytes = so.getValueAsHeapByteArray();
            importer.importNewBytes(bytes, isSerialized);
            if (isSerialized) {
                setCachedSerializedNewValue(bytes);
            }
        } else {
            importer.importNewObject(so.getValueAsDeserializedHeapObject(), true);
        }
    } else if (nv instanceof byte[]) {
        importer.importNewBytes((byte[]) nv, false);
    } else if (nv instanceof CachedDeserializable) {
        CachedDeserializable cd = (CachedDeserializable) nv;
        Object cdV = cd.getValue();
        if (cdV instanceof byte[]) {
            importer.importNewBytes((byte[]) cdV, true);
            setCachedSerializedNewValue((byte[]) cdV);
        } else {
            importer.importNewObject(cdV, true);
        }
    } else {
        importer.importNewObject(nv, true);
    }
}
Also used : StoredObject(org.apache.geode.internal.offheap.StoredObject) StoredObject(org.apache.geode.internal.offheap.StoredObject) Unretained(org.apache.geode.internal.offheap.annotations.Unretained)

Example 24 with StoredObject

use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.

the class EntryEventImpl method setNewValueInRegion.

@Retained(ENTRY_EVENT_NEW_VALUE)
private void setNewValueInRegion(final LocalRegion owner, final RegionEntry reentry, Object oldValueForDelta) throws RegionClearedException {
    boolean wasTombstone = reentry.isTombstone();
    // not be applied. This is possible if the event originated locally.
    if (this.deltaBytes != null && this.newValue == null) {
        processDeltaBytes(oldValueForDelta);
    }
    if (owner != null) {
        owner.generateAndSetVersionTag(this, reentry);
    } else {
        this.region.generateAndSetVersionTag(this, reentry);
    }
    Object v = this.newValue;
    if (v == null) {
        v = isLocalInvalid() ? Token.LOCAL_INVALID : Token.INVALID;
    } else {
        this.region.regionInvalid = false;
    }
    reentry.setValueResultOfSearch(this.op.isNetSearch());
    // in the primary.
    if (v instanceof org.apache.geode.Delta && region.isUsedForPartitionedRegionBucket()) {
        int vSize;
        Object ov = basicGetOldValue();
        if (ov instanceof CachedDeserializable && !GemFireCacheImpl.DELTAS_RECALCULATE_SIZE) {
            vSize = ((CachedDeserializable) ov).getValueSizeInBytes();
        } else {
            vSize = CachedDeserializableFactory.calcMemSize(v, region.getObjectSizer(), false);
        }
        v = CachedDeserializableFactory.create(v, vSize);
        basicSetNewValue(v);
    }
    Object preparedV = reentry.prepareValueForCache(this.region, v, this, false);
    if (preparedV != v) {
        v = preparedV;
        if (v instanceof StoredObject) {
            if (!((StoredObject) v).isCompressed()) {
                // fix bug 52109
                // If we put it off heap and it is not compressed then remember that value.
                // Otherwise we want to remember the decompressed value in the event.
                basicSetNewValue(v);
            }
        }
    }
    boolean isTombstone = (v == Token.TOMBSTONE);
    boolean success = false;
    boolean calledSetValue = false;
    try {
        setNewValueBucketSize(owner, v);
        if ((this.op.isUpdate() && !reentry.isInvalid()) || this.op.isInvalidate()) {
            IndexManager idxManager = IndexUtils.getIndexManager(this.region, false);
            if (idxManager != null) {
                try {
                    idxManager.updateIndexes(reentry, IndexManager.REMOVE_ENTRY, this.op.isUpdate() ? IndexProtocol.BEFORE_UPDATE_OP : IndexProtocol.OTHER_OP);
                } catch (QueryException e) {
                    throw new IndexMaintenanceException(e);
                }
            }
        }
        calledSetValue = true;
        // already called prepareValueForCache
        reentry.setValueWithTombstoneCheck(v, this);
        success = true;
    } finally {
        if (!success && reentry instanceof OffHeapRegionEntry && v instanceof StoredObject) {
            OffHeapRegionEntryHelper.releaseEntry((OffHeapRegionEntry) reentry, (StoredObject) v);
        }
    }
    if (logger.isTraceEnabled()) {
        if (v instanceof CachedDeserializable) {
            logger.trace("EntryEventImpl.setNewValueInRegion: put CachedDeserializable({},{})", this.getKey(), ((CachedDeserializable) v).getStringForm());
        } else {
            logger.trace("EntryEventImpl.setNewValueInRegion: put({},{})", this.getKey(), StringUtils.forceToString(v));
        }
    }
    if (!isTombstone && wasTombstone) {
        owner.unscheduleTombstone(reentry);
    }
}
Also used : IndexManager(org.apache.geode.cache.query.internal.index.IndexManager) QueryException(org.apache.geode.cache.query.QueryException) StoredObject(org.apache.geode.internal.offheap.StoredObject) StoredObject(org.apache.geode.internal.offheap.StoredObject) IndexMaintenanceException(org.apache.geode.cache.query.IndexMaintenanceException) Retained(org.apache.geode.internal.offheap.annotations.Retained)

Example 25 with StoredObject

use of org.apache.geode.internal.offheap.StoredObject in project geode by apache.

the class EntryEventImpl method retainAndSetOldValue.

@Released(ENTRY_EVENT_OLD_VALUE)
private void retainAndSetOldValue(@Retained(ENTRY_EVENT_OLD_VALUE) Object v) {
    if (v == this.oldValue)
        return;
    if (isOffHeapReference(v)) {
        StoredObject so = (StoredObject) v;
        if (ReferenceCountHelper.trackReferenceCounts()) {
            ReferenceCountHelper.setReferenceCountOwner(new OldValueOwner());
            boolean couldNotRetain = (!so.retain());
            ReferenceCountHelper.setReferenceCountOwner(null);
            if (couldNotRetain) {
                this.oldValue = null;
                return;
            }
        } else {
            if (!so.retain()) {
                this.oldValue = null;
                return;
            }
        }
    }
    basicSetOldValue(v);
}
Also used : StoredObject(org.apache.geode.internal.offheap.StoredObject) Released(org.apache.geode.internal.offheap.annotations.Released)

Aggregations

StoredObject (org.apache.geode.internal.offheap.StoredObject)34 Test (org.junit.Test)11 UnitTest (org.apache.geode.test.junit.categories.UnitTest)10 Retained (org.apache.geode.internal.offheap.annotations.Retained)7 Unretained (org.apache.geode.internal.offheap.annotations.Unretained)7 Released (org.apache.geode.internal.offheap.annotations.Released)6 CachedDeserializable (org.apache.geode.internal.cache.CachedDeserializable)5 NewValueImporter (org.apache.geode.internal.cache.EntryEventImpl.NewValueImporter)4 OldValueImporter (org.apache.geode.internal.cache.EntryEventImpl.OldValueImporter)4 IOException (java.io.IOException)3 PdxInstance (org.apache.geode.pdx.PdxInstance)2 InterruptedIOException (java.io.InterruptedIOException)1 ByteBuffer (java.nio.ByteBuffer)1 ArrayList (java.util.ArrayList)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 CacheException (org.apache.geode.cache.CacheException)1 CacheWriterException (org.apache.geode.cache.CacheWriterException)1 DiskAccessException (org.apache.geode.cache.DiskAccessException)1 EntryEvent (org.apache.geode.cache.EntryEvent)1 Region (org.apache.geode.cache.Region)1