use of org.apache.avro.io.Encoder in project voldemort by voldemort.
the class AvroVersionedGenericSerializer method toBytes.
public byte[] toBytes(Object object) {
ByteArrayOutputStream output = new ByteArrayOutputStream();
Encoder encoder = new BinaryEncoder(output);
GenericDatumWriter<Object> datumWriter = null;
output.write(newestVersion.byteValue());
try {
datumWriter = new GenericDatumWriter<Object>(typeDef);
datumWriter.write(object, encoder);
encoder.flush();
} catch (SerializationException sE) {
throw sE;
} catch (IOException e) {
throw new SerializationException(e);
} catch (Exception aIOBE) {
// probably the object sent to us was not created using the latest
// schema
// We simply check the old version number and serialize it using the
// old schema version
Schema writer = ((GenericContainer) object).getSchema();
Integer writerVersion = getSchemaVersion(writer);
return toBytes(object, writer, writerVersion);
} finally {
SerializationUtils.close(output);
}
return output.toByteArray();
}
use of org.apache.avro.io.Encoder in project voldemort by voldemort.
the class AvroReflectiveSerializer method toBytes.
public byte[] toBytes(T object) {
ByteArrayOutputStream output = new ByteArrayOutputStream();
Encoder encoder = new BinaryEncoder(output);
ReflectDatumWriter<T> datumWriter = null;
try {
datumWriter = new ReflectDatumWriter<T>(clazz);
datumWriter.write(object, encoder);
encoder.flush();
} catch (IOException e) {
throw new SerializationException(e);
} finally {
SerializationUtils.close(output);
}
return output.toByteArray();
}
use of org.apache.avro.io.Encoder in project cdap by caskdata.
the class ClientMessagingService method encodeRollbackDetail.
/**
* Encodes the given {@link RollbackDetail} as expected by the rollback call. This method is rarely used
* as the call to {@link #rollback(TopicId, RollbackDetail)} expects a {@link ClientRollbackDetail} which
* already contains the encoded bytes.
*
* This method looks very similar to the {@code StoreHandler.encodeRollbackDetail} method, but is intended to have
* them separated. This is to allow client side classes be moved to separate module without any dependency
* on the server side (this can also be done with a util method in a common module, but it is kind of overkill
* for a simple method like this for now).
*/
private ByteBuffer encodeRollbackDetail(RollbackDetail rollbackDetail) throws IOException {
// Constructs the response object as GenericRecord
Schema schema = Schemas.V1.PublishResponse.SCHEMA;
GenericRecord record = new GenericData.Record(schema);
record.put("transactionWritePointer", rollbackDetail.getTransactionWritePointer());
GenericRecord rollbackRange = new GenericData.Record(schema.getField("rollbackRange").schema());
rollbackRange.put("startTimestamp", rollbackDetail.getStartTimestamp());
rollbackRange.put("startSequenceId", rollbackDetail.getStartSequenceId());
rollbackRange.put("endTimestamp", rollbackDetail.getEndTimestamp());
rollbackRange.put("endSequenceId", rollbackDetail.getEndSequenceId());
record.put("rollbackRange", rollbackRange);
ExposedByteArrayOutputStream os = new ExposedByteArrayOutputStream();
Encoder encoder = EncoderFactory.get().directBinaryEncoder(os, null);
DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(Schemas.V1.PublishRequest.SCHEMA);
datumWriter.write(record, encoder);
return os.toByteBuffer();
}
use of org.apache.avro.io.Encoder in project cdap by caskdata.
the class StoreHandler method encodeRollbackDetail.
/**
* Encodes the {@link RollbackDetail} object as avro record based on the {@link Schemas.V1.PublishResponse#SCHEMA}.
*/
private ChannelBuffer encodeRollbackDetail(RollbackDetail rollbackDetail) throws IOException {
Schema schema = Schemas.V1.PublishResponse.SCHEMA;
// Constructs the response object as GenericRecord
GenericRecord response = new GenericData.Record(schema);
response.put("transactionWritePointer", rollbackDetail.getTransactionWritePointer());
GenericRecord rollbackRange = new GenericData.Record(schema.getField("rollbackRange").schema());
rollbackRange.put("startTimestamp", rollbackDetail.getStartTimestamp());
rollbackRange.put("startSequenceId", rollbackDetail.getStartSequenceId());
rollbackRange.put("endTimestamp", rollbackDetail.getEndTimestamp());
rollbackRange.put("endSequenceId", rollbackDetail.getEndSequenceId());
response.put("rollbackRange", rollbackRange);
// For V1 PublishResponse, it contains an union(long, null) and then 2 longs and 2 integers,
// hence the max size is 38
// (union use 1 byte, long max size is 9 bytes, integer max size is 5 bytes in avro binary encoding)
ChannelBuffer buffer = ChannelBuffers.dynamicBuffer(38);
Encoder encoder = EncoderFactory.get().directBinaryEncoder(new ChannelBufferOutputStream(buffer), null);
DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
datumWriter.write(response, encoder);
return buffer;
}
use of org.apache.avro.io.Encoder in project storm by apache.
the class AvroSerializer method write.
@Override
public ByteBuffer write(List<Object> data, ByteBuffer buffer) {
Preconditions.checkArgument(data != null && data.size() == fieldNames.size(), "Invalid schemas");
try {
Schema schema = schemas.getSchema(schemaString);
GenericRecord record = new GenericData.Record(schema);
for (int i = 0; i < fieldNames.size(); i++) {
record.put(fieldNames.get(i), data.get(i));
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
DatumWriter<GenericRecord> writer = new GenericDatumWriter<>(record.getSchema());
Encoder encoder = EncoderFactory.get().directBinaryEncoder(out, null);
writer.write(record, encoder);
encoder.flush();
byte[] bytes = out.toByteArray();
out.close();
return ByteBuffer.wrap(bytes);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
Aggregations