Search in sources :

Example 51 with GenericDatumWriter

use of org.apache.avro.generic.GenericDatumWriter in project cdap by caskdata.

the class AvroRecordFormatTest method toStreamEvent.

private StreamEvent toStreamEvent(GenericRecord record, boolean writeSchema) throws IOException {
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    BinaryEncoder encoder = EncoderFactory.get().binaryEncoder(out, null);
    DatumWriter<GenericRecord> writer = new GenericDatumWriter<>(record.getSchema());
    writer.write(record, encoder);
    encoder.flush();
    out.close();
    byte[] serializedRecord = out.toByteArray();
    String schemaString = record.getSchema().toString();
    Map<String, String> headers = Maps.newHashMap();
    if (writeSchema) {
        headers.put(AvroRecordFormat.SCHEMA, schemaString);
        headers.put(AvroRecordFormat.SCHEMA_HASH, Hashing.md5().hashString(schemaString, Charsets.UTF_8).toString());
    }
    return new StreamEvent(headers, ByteBuffer.wrap(serializedRecord));
}
Also used : BinaryEncoder(org.apache.avro.io.BinaryEncoder) StreamEvent(co.cask.cdap.api.flow.flowlet.StreamEvent) ByteArrayOutputStream(java.io.ByteArrayOutputStream) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) GenericRecord(org.apache.avro.generic.GenericRecord)

Example 52 with GenericDatumWriter

use of org.apache.avro.generic.GenericDatumWriter in project cdap by caskdata.

the class ClientMessagingService method encodeRollbackDetail.

/**
 * Encodes the given {@link RollbackDetail} as expected by the rollback call. This method is rarely used
 * as the call to {@link #rollback(TopicId, RollbackDetail)} expects a {@link ClientRollbackDetail} which
 * already contains the encoded bytes.
 *
 * This method looks very similar to the {@code StoreHandler.encodeRollbackDetail} method, but is intended to have
 * them separated. This is to allow client side classes be moved to separate module without any dependency
 * on the server side (this can also be done with a util method in a common module, but it is kind of overkill
 * for a simple method like this for now).
 */
private ByteBuffer encodeRollbackDetail(RollbackDetail rollbackDetail) throws IOException {
    // Constructs the response object as GenericRecord
    Schema schema = Schemas.V1.PublishResponse.SCHEMA;
    GenericRecord record = new GenericData.Record(schema);
    record.put("transactionWritePointer", rollbackDetail.getTransactionWritePointer());
    GenericRecord rollbackRange = new GenericData.Record(schema.getField("rollbackRange").schema());
    rollbackRange.put("startTimestamp", rollbackDetail.getStartTimestamp());
    rollbackRange.put("startSequenceId", rollbackDetail.getStartSequenceId());
    rollbackRange.put("endTimestamp", rollbackDetail.getEndTimestamp());
    rollbackRange.put("endSequenceId", rollbackDetail.getEndSequenceId());
    record.put("rollbackRange", rollbackRange);
    ExposedByteArrayOutputStream os = new ExposedByteArrayOutputStream();
    Encoder encoder = EncoderFactory.get().directBinaryEncoder(os, null);
    DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(Schemas.V1.PublishRequest.SCHEMA);
    datumWriter.write(record, encoder);
    return os.toByteBuffer();
}
Also used : Encoder(org.apache.avro.io.Encoder) Schema(org.apache.avro.Schema) GenericRecord(org.apache.avro.generic.GenericRecord) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) GenericRecord(org.apache.avro.generic.GenericRecord)

Example 53 with GenericDatumWriter

use of org.apache.avro.generic.GenericDatumWriter in project cdap by caskdata.

the class StoreHandler method encodeRollbackDetail.

/**
 * Encodes the {@link RollbackDetail} object as avro record based on the {@link Schemas.V1.PublishResponse#SCHEMA}.
 */
private ByteBuf encodeRollbackDetail(RollbackDetail rollbackDetail) throws IOException {
    Schema schema = Schemas.V1.PublishResponse.SCHEMA;
    // Constructs the response object as GenericRecord
    GenericRecord response = new GenericData.Record(schema);
    response.put("transactionWritePointer", rollbackDetail.getTransactionWritePointer());
    GenericRecord rollbackRange = new GenericData.Record(schema.getField("rollbackRange").schema());
    rollbackRange.put("startTimestamp", rollbackDetail.getStartTimestamp());
    rollbackRange.put("startSequenceId", rollbackDetail.getStartSequenceId());
    rollbackRange.put("endTimestamp", rollbackDetail.getEndTimestamp());
    rollbackRange.put("endSequenceId", rollbackDetail.getEndSequenceId());
    response.put("rollbackRange", rollbackRange);
    // For V1 PublishResponse, it contains an union(long, null) and then 2 longs and 2 integers,
    // hence the max size is 38
    // (union use 1 byte, long max size is 9 bytes, integer max size is 5 bytes in avro binary encoding)
    ByteBuf buffer = Unpooled.buffer(38);
    Encoder encoder = EncoderFactory.get().directBinaryEncoder(new ByteBufOutputStream(buffer), null);
    DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
    datumWriter.write(response, encoder);
    return buffer;
}
Also used : ByteBufOutputStream(io.netty.buffer.ByteBufOutputStream) Encoder(org.apache.avro.io.Encoder) Schema(org.apache.avro.Schema) GenericRecord(org.apache.avro.generic.GenericRecord) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) GenericRecord(org.apache.avro.generic.GenericRecord) ByteBuf(io.netty.buffer.ByteBuf)

Example 54 with GenericDatumWriter

use of org.apache.avro.generic.GenericDatumWriter in project storm by apache.

the class AbstractAvroSerializer method write.

@Override
public void write(Kryo kryo, Output output, GenericContainer record) {
    String fingerPrint = this.getFingerprint(record.getSchema());
    output.writeString(fingerPrint);
    GenericDatumWriter<GenericContainer> writer = new GenericDatumWriter<>(record.getSchema());
    BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(output, null);
    try {
        writer.write(record, encoder);
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
Also used : BinaryEncoder(org.apache.avro.io.BinaryEncoder) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) IOException(java.io.IOException) GenericContainer(org.apache.avro.generic.GenericContainer)

Example 55 with GenericDatumWriter

use of org.apache.avro.generic.GenericDatumWriter in project databus by linkedin.

the class GoldenGateEventProducer method addEventToBuffer.

/**
 * @param dbUpdates  The dbUpdates present in the current transaction
 * @param ti The meta information about the transaction. (See TransactionInfo class for more details).
 * @throws DatabusException
 * @throws UnsupportedKeyException
 */
protected void addEventToBuffer(List<TransactionState.PerSourceTransactionalUpdate> dbUpdates, TransactionInfo ti) throws DatabusException, UnsupportedKeyException {
    if (dbUpdates.size() == 0)
        throw new DatabusException("Cannot handle empty dbUpdates");
    long scn = ti.getScn();
    long timestamp = ti.getTransactionTimeStampNs();
    EventSourceStatistics globalStats = getSource(GLOBAL_SOURCE_ID).getStatisticsBean();
    /**
     * We skip the start scn of the relay, we have already added a EOP for this SCN in the buffer.
     * Why is this not a problem ?
     * There are two cases:
     * 1. When we use the earliest/latest scn if there is no maxScn (We don't really have a start point). So it's really OK to miss the first event.
     * 2. If it's the maxSCN, then event was already seen by the relay.
     */
    if (scn == _startPrevScn.get()) {
        _log.info("Skipping this transaction, EOP already send for this event");
        return;
    }
    getEventBuffer().startEvents();
    int eventsInTransactionCount = 0;
    List<EventReaderSummary> summaries = new ArrayList<EventReaderSummary>();
    for (int i = 0; i < dbUpdates.size(); ++i) {
        GenericRecord record = null;
        TransactionState.PerSourceTransactionalUpdate perSourceUpdate = dbUpdates.get(i);
        short sourceId = (short) perSourceUpdate.getSourceId();
        // prepare stats collection per source
        EventSourceStatistics perSourceStats = getSource(sourceId).getStatisticsBean();
        Iterator<DbUpdateState.DBUpdateImage> dbUpdateIterator = perSourceUpdate.getDbUpdatesSet().iterator();
        int eventsInDbUpdate = 0;
        long dbUpdatesEventsSize = 0;
        long startDbUpdatesMs = System.currentTimeMillis();
        while (// TODO verify if there is any case where we need to rollback.
        dbUpdateIterator.hasNext()) {
            DbUpdateState.DBUpdateImage dbUpdate = dbUpdateIterator.next();
            // Construct the Databus Event key, determine the key type and construct the key
            Object keyObj = obtainKey(dbUpdate);
            DbusEventKey eventKey = new DbusEventKey(keyObj);
            // Get the logicalparition id
            PartitionFunction partitionFunction = _partitionFunctionHashMap.get((int) sourceId);
            short lPartitionId = partitionFunction.getPartition(eventKey);
            record = dbUpdate.getGenericRecord();
            // Write the event to the buffer
            if (record == null)
                throw new DatabusException("Cannot write event to buffer because record = " + record);
            if (record.getSchema() == null)
                throw new DatabusException("The record does not have a schema (null schema)");
            try {
                // Collect stats on number of dbUpdates for one source
                eventsInDbUpdate++;
                // Count of all the events in the current transaction
                eventsInTransactionCount++;
                // Serialize the row
                ByteArrayOutputStream bos = new ByteArrayOutputStream();
                Encoder encoder = new BinaryEncoder(bos);
                GenericDatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(record.getSchema());
                writer.write(record, encoder);
                byte[] serializedValue = bos.toByteArray();
                // Get the md5 for the schema
                SchemaId schemaId = SchemaId.createWithMd5(dbUpdate.getSchema());
                // Determine the operation type and convert to dbus opcode
                DbusOpcode opCode;
                if (dbUpdate.getOpType() == DbUpdateState.DBUpdateImage.OpType.INSERT || dbUpdate.getOpType() == DbUpdateState.DBUpdateImage.OpType.UPDATE) {
                    opCode = DbusOpcode.UPSERT;
                    if (_log.isDebugEnabled())
                        _log.debug("The event with scn " + scn + " is INSERT/UPDATE");
                } else if (dbUpdate.getOpType() == DbUpdateState.DBUpdateImage.OpType.DELETE) {
                    opCode = DbusOpcode.DELETE;
                    if (_log.isDebugEnabled())
                        _log.debug("The event with scn " + scn + " is DELETE");
                } else {
                    throw new DatabusException("Unknown opcode from dbUpdate for event with scn:" + scn);
                }
                // Construct the dbusEvent info
                DbusEventInfo dbusEventInfo = new DbusEventInfo(opCode, scn, (short) _pConfig.getId(), lPartitionId, timestamp, sourceId, schemaId.getByteArray(), serializedValue, false, false);
                dbusEventInfo.setReplicated(dbUpdate.isReplicated());
                perSourceStats.addEventCycle(1, ti.getTransactionTimeRead(), serializedValue.length, scn);
                globalStats.addEventCycle(1, ti.getTransactionTimeRead(), serializedValue.length, scn);
                long tsEnd = System.currentTimeMillis();
                perSourceStats.addTimeOfLastDBAccess(tsEnd);
                globalStats.addTimeOfLastDBAccess(tsEnd);
                // Append to the event buffer
                getEventBuffer().appendEvent(eventKey, dbusEventInfo, _statsCollector);
                _rc.incrementEventCount();
                dbUpdatesEventsSize += serializedValue.length;
            } catch (IOException io) {
                perSourceStats.addError();
                globalStats.addEmptyEventCycle();
                _log.error("Cannot create byte stream payload: " + dbUpdates.get(i).getSourceId());
            }
        }
        long endDbUpdatesMs = System.currentTimeMillis();
        long dbUpdatesElapsedTimeMs = endDbUpdatesMs - startDbUpdatesMs;
        // Log Event Summary at logical source level
        EventReaderSummary summary = new EventReaderSummary(sourceId, _monitoredSources.get(sourceId).getSourceName(), scn, eventsInDbUpdate, dbUpdatesEventsSize, -1L, /* Not supported */
        dbUpdatesElapsedTimeMs, timestamp, timestamp, -1L);
        if (_eventsLog.isInfoEnabled()) {
            _eventsLog.info(summary.toString());
        }
        summaries.add(summary);
        if (_log.isDebugEnabled())
            _log.debug("There are " + eventsInDbUpdate + " events seen in the current dbUpdate");
    }
    // Log Event Summary at Physical source level
    ReadEventCycleSummary summary = new ReadEventCycleSummary(_pConfig.getName(), summaries, scn, -1);
    if (_eventsLog.isInfoEnabled()) {
        _eventsLog.info(summary.toString());
    }
    _log.info("Writing " + eventsInTransactionCount + " events from transaction with scn: " + scn);
    if (scn <= 0)
        throw new DatabusException("Unable to write events to buffer because of negative/zero scn: " + scn);
    getEventBuffer().endEvents(scn, _statsCollector);
    _scn.set(scn);
    if (getMaxScnReaderWriter() != null) {
        try {
            getMaxScnReaderWriter().saveMaxScn(_scn.get());
        } catch (DatabusException e) {
            _log.error("Cannot save scn = " + _scn + " for physical source = " + getName(), e);
        }
    }
}
Also used : PartitionFunction(com.linkedin.databus2.producers.PartitionFunction) TransactionState(com.linkedin.databus2.ggParser.XmlStateMachine.TransactionState) ArrayList(java.util.ArrayList) EventSourceStatistics(com.linkedin.databus.monitoring.mbean.EventSourceStatistics) DbusEventInfo(com.linkedin.databus.core.DbusEventInfo) ReadEventCycleSummary(com.linkedin.databus2.producers.db.ReadEventCycleSummary) EventReaderSummary(com.linkedin.databus2.producers.db.EventReaderSummary) Encoder(org.apache.avro.io.Encoder) BinaryEncoder(org.apache.avro.io.BinaryEncoder) DbusOpcode(com.linkedin.databus.core.DbusOpcode) GenericRecord(org.apache.avro.generic.GenericRecord) DbUpdateState(com.linkedin.databus2.ggParser.XmlStateMachine.DbUpdateState) ByteArrayOutputStream(java.io.ByteArrayOutputStream) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) IOException(java.io.IOException) DatabusException(com.linkedin.databus2.core.DatabusException) BinaryEncoder(org.apache.avro.io.BinaryEncoder) SchemaId(com.linkedin.databus2.schemas.SchemaId) PerSourceTransactionalUpdate(com.linkedin.databus2.ggParser.XmlStateMachine.TransactionState.PerSourceTransactionalUpdate) DbusEventKey(com.linkedin.databus.core.DbusEventKey)

Aggregations

GenericDatumWriter (org.apache.avro.generic.GenericDatumWriter)127 GenericRecord (org.apache.avro.generic.GenericRecord)105 Schema (org.apache.avro.Schema)69 ByteArrayOutputStream (java.io.ByteArrayOutputStream)57 DataFileWriter (org.apache.avro.file.DataFileWriter)47 File (java.io.File)40 Test (org.junit.Test)37 IOException (java.io.IOException)29 BinaryEncoder (org.apache.avro.io.BinaryEncoder)29 MockFlowFile (org.apache.nifi.util.MockFlowFile)25 Encoder (org.apache.avro.io.Encoder)23 TestRunner (org.apache.nifi.util.TestRunner)20 HashMap (java.util.HashMap)14 ByteArrayOutputStream (org.apache.nifi.stream.io.ByteArrayOutputStream)14 GenericData (org.apache.avro.generic.GenericData)12 ByteArrayInputStream (java.io.ByteArrayInputStream)11 FileOutputStream (java.io.FileOutputStream)10 InputStream (java.io.InputStream)9 ArrayList (java.util.ArrayList)8 GenericDatumReader (org.apache.avro.generic.GenericDatumReader)8