Search in sources :

Example 91 with HeapDataOutputStream

use of org.apache.geode.internal.HeapDataOutputStream in project geode by apache.

the class DataSerializer method writeObjectAsByteArray.

/**
   * Serialize the given object <code>obj</code> into a byte array using
   * {@link #writeObject(Object, DataOutput)} and then writes the byte array to the given data
   * output <code>out</code> in the same format {@link #writeByteArray(byte[], DataOutput)} does.
   * This method will serialize a <code>null</code> obj and not throw a
   * <code>NullPointerException</code>.
   *
   * @param obj the object to serialize and write
   * @param out the data output to write the byte array to
   * @throws IllegalArgumentException if a problem occurs while serialize <code>obj</code>
   * @throws IOException if a problem occurs while writing to <code>out</code>
   *
   * @see #readByteArray
   * @since GemFire 5.0.2
   */
public static void writeObjectAsByteArray(Object obj, DataOutput out) throws IOException {
    Object object = obj;
    if (obj instanceof CachedDeserializable) {
        if (obj instanceof StoredObject) {
            StoredObject so = (StoredObject) obj;
            if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
                logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray StoredObject");
            }
            so.sendAsByteArray(out);
            return;
        } else {
            object = ((CachedDeserializable) obj).getSerializedValue();
        }
    }
    if (logger.isTraceEnabled(LogMarker.SERIALIZER)) {
        if (object == null) {
            logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray null");
        } else {
            logger.trace(LogMarker.SERIALIZER, "writeObjectAsByteArray obj.getClass={}", object.getClass());
        }
    }
    if (object instanceof byte[] || object == null) {
        writeByteArray((byte[]) object, out);
    } else if (out instanceof ObjToByteArraySerializer) {
        ((ObjToByteArraySerializer) out).writeAsSerializedByteArray(object);
    } else /*
       * else if (obj instanceof Sendable) { ((Sendable)obj).sendTo(out); }
       */
    {
        HeapDataOutputStream hdos;
        if (object instanceof HeapDataOutputStream) {
            hdos = (HeapDataOutputStream) object;
        } else {
            Version v = InternalDataSerializer.getVersionForDataStreamOrNull(out);
            if (v == null) {
                v = Version.CURRENT;
            }
            hdos = new HeapDataOutputStream(v);
            try {
                DataSerializer.writeObject(object, hdos);
            } catch (IOException e) {
                RuntimeException e2 = new IllegalArgumentException(LocalizedStrings.DataSerializer_PROBELM_WHILE_SERIALIZING.toLocalizedString());
                e2.initCause(e);
                throw e2;
            }
        }
        InternalDataSerializer.writeArrayLength(hdos.size(), out);
        hdos.sendTo(out);
    }
}
Also used : ObjToByteArraySerializer(org.apache.geode.internal.ObjToByteArraySerializer) CachedDeserializable(org.apache.geode.internal.cache.CachedDeserializable) StoredObject(org.apache.geode.internal.offheap.StoredObject) Version(org.apache.geode.internal.Version) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) StoredObject(org.apache.geode.internal.offheap.StoredObject) IOException(java.io.IOException)

Example 92 with HeapDataOutputStream

use of org.apache.geode.internal.HeapDataOutputStream in project geode by apache.

the class CumulativeNonDistinctResults method toData.

// TODO : optimize for struct elements , by directly writing the fields
// instead
// of struct
@Override
public void toData(DataOutput out) throws IOException {
    boolean isStruct = this.collectionType.getElementType().isStructType();
    DataSerializer.writeObject(this.collectionType.getElementType(), out);
    HeapDataOutputStream hdos = new HeapDataOutputStream(1024, null);
    LongUpdater lu = hdos.reserveLong();
    Iterator<E> iter = this.iterator();
    int numElements = 0;
    while (iter.hasNext()) {
        E data = iter.next();
        if (isStruct) {
            Object[] fields = ((Struct) data).getFieldValues();
            DataSerializer.writeObjectArray(fields, out);
        } else {
            DataSerializer.writeObject(data, hdos);
        }
        ++numElements;
    }
    lu.update(numElements);
    hdos.sendTo(out);
}
Also used : LongUpdater(org.apache.geode.internal.HeapDataOutputStream.LongUpdater) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) Struct(org.apache.geode.cache.query.Struct)

Example 93 with HeapDataOutputStream

use of org.apache.geode.internal.HeapDataOutputStream in project geode by apache.

the class Oplog method readCrf.

/**
   * Return number of bytes read
   */
private long readCrf(OplogEntryIdSet deletedIds, boolean recoverValues, boolean latestOplog) {
    this.recoverNewEntryId = DiskStoreImpl.INVALID_ID;
    this.recoverModEntryId = DiskStoreImpl.INVALID_ID;
    this.recoverModEntryIdHWM = DiskStoreImpl.INVALID_ID;
    boolean readLastRecord = true;
    CountingDataInputStream dis = null;
    try {
        final LocalRegion currentRegion = LocalRegion.getInitializingRegion();
        final Version version = getProductVersionIfOld();
        final ByteArrayDataInput in = new ByteArrayDataInput();
        final HeapDataOutputStream hdos = new HeapDataOutputStream(Version.CURRENT);
        int recordCount = 0;
        boolean foundDiskStoreRecord = false;
        FileInputStream fis = null;
        try {
            fis = new FileInputStream(this.crf.f);
            dis = new CountingDataInputStream(new BufferedInputStream(fis, 1024 * 1024), this.crf.f.length());
            boolean endOfLog = false;
            while (!endOfLog) {
                // long startPosition = byteCount;
                if (dis.atEndOfFile()) {
                    endOfLog = true;
                    break;
                }
                readLastRecord = false;
                byte opCode = dis.readByte();
                if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                    logger.trace(LogMarker.PERSIST_RECOVERY, "Oplog opCode={}", opCode);
                }
                switch(opCode) {
                    case OPLOG_EOF_ID:
                        // we are at the end of the oplog. So we need to back up one byte
                        dis.decrementCount();
                        endOfLog = true;
                        break;
                    case OPLOG_CONFLICT_VERSION:
                        this.readVersionTagOnlyEntry(dis, opCode);
                        break;
                    case OPLOG_NEW_ENTRY_BASE_ID:
                        {
                            long newEntryBase = dis.readLong();
                            if (logger.isTraceEnabled(LogMarker.PERSIST_RECOVERY)) {
                                logger.trace(LogMarker.PERSIST_RECOVERY, "newEntryBase={}", newEntryBase);
                            }
                            readEndOfRecord(dis);
                            setRecoverNewEntryId(newEntryBase);
                            recordCount++;
                        }
                        break;
                    case OPLOG_NEW_ENTRY_0ID:
                        readNewEntry(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_MOD_ENTRY_1ID:
                    case OPLOG_MOD_ENTRY_2ID:
                    case OPLOG_MOD_ENTRY_3ID:
                    case OPLOG_MOD_ENTRY_4ID:
                    case OPLOG_MOD_ENTRY_5ID:
                    case OPLOG_MOD_ENTRY_6ID:
                    case OPLOG_MOD_ENTRY_7ID:
                    case OPLOG_MOD_ENTRY_8ID:
                        readModifyEntry(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_MOD_ENTRY_WITH_KEY_1ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_2ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_3ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_4ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_5ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_6ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_7ID:
                    case OPLOG_MOD_ENTRY_WITH_KEY_8ID:
                        readModifyEntryWithKey(dis, opCode, deletedIds, recoverValues, currentRegion, version, in, hdos);
                        recordCount++;
                        break;
                    case OPLOG_DISK_STORE_ID:
                        readDiskStoreRecord(dis, this.crf.f);
                        foundDiskStoreRecord = true;
                        recordCount++;
                        break;
                    case OPLOG_MAGIC_SEQ_ID:
                        readOplogMagicSeqRecord(dis, this.crf.f, OPLOG_TYPE.CRF);
                        break;
                    case OPLOG_GEMFIRE_VERSION:
                        readGemfireVersionRecord(dis, this.crf.f);
                        recordCount++;
                        break;
                    case OPLOG_RVV:
                        readRVVRecord(dis, this.drf.f, false, latestOplog);
                        recordCount++;
                        break;
                    default:
                        throw new DiskAccessException(LocalizedStrings.Oplog_UNKNOWN_OPCODE_0_FOUND_IN_DISK_OPERATION_LOG.toLocalizedString(opCode), getParent());
                }
                readLastRecord = true;
            // @todo
            // if (rgn.isDestroyed()) {
            // break;
            // }
            }
        // while
        } finally {
            if (dis != null) {
                dis.close();
            }
            if (fis != null) {
                fis.close();
            }
        }
        if (!foundDiskStoreRecord && recordCount > 0) {
            throw new DiskAccessException("The oplog file \"" + this.crf.f + "\" does not belong to the init file \"" + getParent().getInitFile() + "\". Crf did not contain a disk store id.", getParent());
        }
    } catch (EOFException ignore) {
    // ignore since a partial record write can be caused by a crash
    } catch (IOException ex) {
        getParent().getCancelCriterion().checkCancelInProgress(ex);
        throw new DiskAccessException(LocalizedStrings.Oplog_FAILED_READING_FILE_DURING_RECOVERY_FROM_0.toLocalizedString(this.crf.f.getPath()), ex, getParent());
    } catch (CancelException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Oplog::readOplog:Error in recovery as Cache was closed", e);
        }
    } catch (RegionDestroyedException e) {
        if (logger.isDebugEnabled()) {
            logger.debug("Oplog::readOplog:Error in recovery as Region was destroyed", e);
        }
    } catch (IllegalStateException e) {
        throw e;
    }
    // Add the Oplog size to the Directory Holder which owns this oplog,
    // so that available space is correctly calculated & stats updated.
    long byteCount = 0;
    if (!readLastRecord) {
        // this means that there was a crash
        // and hence we should not continue to read
        // the next oplog
        this.crashed = true;
        if (dis != null) {
            byteCount = dis.getFileLength();
        }
    } else {
        if (dis != null) {
            byteCount = dis.getCount();
        }
    }
    return byteCount;
}
Also used : RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ByteArrayDataInput(org.apache.geode.internal.ByteArrayDataInput) FileInputStream(java.io.FileInputStream) Version(org.apache.geode.internal.Version) BufferedInputStream(java.io.BufferedInputStream) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) DiskAccessException(org.apache.geode.cache.DiskAccessException) EOFException(java.io.EOFException) CancelException(org.apache.geode.CancelException)

Example 94 with HeapDataOutputStream

use of org.apache.geode.internal.HeapDataOutputStream in project geode by apache.

the class BlobHelper method serializeToBlob.

/**
   * A blob is a serialized Object. This method serializes the object into a blob and returns the
   * byte array that contains the blob.
   */
public static byte[] serializeToBlob(Object obj, Version version) throws IOException {
    final long start = startSerialization();
    HeapDataOutputStream hdos = new HeapDataOutputStream(version);
    DataSerializer.writeObject(obj, hdos);
    byte[] result = hdos.toByteArray();
    endSerialization(start, result.length);
    return result;
}
Also used : HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream)

Example 95 with HeapDataOutputStream

use of org.apache.geode.internal.HeapDataOutputStream in project geode by apache.

the class ClientCacheFactoryJUnitTest method testOldClientIDDeserialization.

@Test
public void testOldClientIDDeserialization() throws Exception {
    // during a HandShake a clientID is read w/o knowing the client's
    // version
    cc = new ClientCacheFactory().create();
    GemFireCacheImpl gfc = (GemFireCacheImpl) cc;
    InternalDistributedMember memberID = (InternalDistributedMember) cc.getDistributedSystem().getDistributedMember();
    GMSMember gmsID = (GMSMember) memberID.getNetMember();
    memberID.setVersionObjectForTest(Version.GFE_82);
    assertEquals(Version.GFE_82, memberID.getVersionObject());
    ClientProxyMembershipID clientID = ClientProxyMembershipID.getClientId(memberID);
    HeapDataOutputStream out = new HeapDataOutputStream(Version.GFE_82);
    DataSerializer.writeObject(clientID, out);
    DataInputStream in = new VersionedDataInputStream(new ByteArrayInputStream(out.toByteArray()), Version.CURRENT);
    ClientProxyMembershipID newID = DataSerializer.readObject(in);
    InternalDistributedMember newMemberID = (InternalDistributedMember) newID.getDistributedMember();
    assertEquals(Version.GFE_82, newMemberID.getVersionObject());
    assertEquals(Version.GFE_82, newID.getClientVersion());
    GMSMember newGmsID = (GMSMember) newMemberID.getNetMember();
    assertEquals(0, newGmsID.getUuidLSBs());
    assertEquals(0, newGmsID.getUuidMSBs());
    gmsID.setUUID(new UUID(1234l, 5678l));
    memberID.setVersionObjectForTest(Version.CURRENT);
    clientID = ClientProxyMembershipID.getClientId(memberID);
    out = new HeapDataOutputStream(Version.CURRENT);
    DataSerializer.writeObject(clientID, out);
    in = new VersionedDataInputStream(new ByteArrayInputStream(out.toByteArray()), Version.CURRENT);
    newID = DataSerializer.readObject(in);
    newMemberID = (InternalDistributedMember) newID.getDistributedMember();
    assertEquals(Version.CURRENT, newMemberID.getVersionObject());
    assertEquals(Version.CURRENT, newID.getClientVersion());
    newGmsID = (GMSMember) newMemberID.getNetMember();
    assertEquals(gmsID.getUuidLSBs(), newGmsID.getUuidLSBs());
    assertEquals(gmsID.getUuidMSBs(), newGmsID.getUuidMSBs());
}
Also used : ClientProxyMembershipID(org.apache.geode.internal.cache.tier.sockets.ClientProxyMembershipID) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) ByteArrayInputStream(java.io.ByteArrayInputStream) GMSMember(org.apache.geode.distributed.internal.membership.gms.GMSMember) HeapDataOutputStream(org.apache.geode.internal.HeapDataOutputStream) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) DataInputStream(java.io.DataInputStream) VersionedDataInputStream(org.apache.geode.internal.VersionedDataInputStream) UUID(org.jgroups.util.UUID) VersionedDataInputStream(org.apache.geode.internal.VersionedDataInputStream) ClientServerTest(org.apache.geode.test.junit.categories.ClientServerTest) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Aggregations

HeapDataOutputStream (org.apache.geode.internal.HeapDataOutputStream)134 Test (org.junit.Test)55 IOException (java.io.IOException)40 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)36 SerializationTest (org.apache.geode.test.junit.categories.SerializationTest)33 DataInputStream (java.io.DataInputStream)29 ByteArrayInputStream (java.io.ByteArrayInputStream)23 UnitTest (org.apache.geode.test.junit.categories.UnitTest)15 DiskAccessException (org.apache.geode.cache.DiskAccessException)12 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)11 PdxSerializerObject (org.apache.geode.internal.PdxSerializerObject)10 JsonGenerator (com.fasterxml.jackson.core.JsonGenerator)8 Version (org.apache.geode.internal.Version)8 DataInput (java.io.DataInput)7 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)7 OutputStream (java.io.OutputStream)6 Properties (java.util.Properties)6 ByteBuffer (java.nio.ByteBuffer)5 HashMap (java.util.HashMap)5 InternalGemFireException (org.apache.geode.InternalGemFireException)5