use of com.google.flatbuffers.FlatBufferBuilder in project rtree by davidmoten.
the class SerializerFlatBuffers method write.
@Override
public void write(RTree<T, S> tree, OutputStream os) throws IOException {
FlatBufferBuilder builder = new FlatBufferBuilder();
final Rectangle mbb;
if (tree.root().isPresent()) {
mbb = tree.root().get().geometry().mbr();
} else {
mbb = Geometries.rectangle(0, 0, 0, 0);
}
int b = toBounds(builder, mbb);
Context_.startContext_(builder);
Context_.addBounds(builder, b);
Context_.addMinChildren(builder, tree.context().minChildren());
Context_.addMaxChildren(builder, tree.context().maxChildren());
int c = Context_.endContext_(builder);
final int n;
if (tree.root().isPresent()) {
n = addNode(tree.root().get(), builder, factory.serializer());
} else {
// won't be used
n = 0;
}
// int t = Tree_.createTree_(builder, c, n, tree.size());
Tree_.startTree_(builder);
Tree_.addContext(builder, c);
Tree_.addSize(builder, tree.size());
if (tree.size() > 0) {
Tree_.addRoot(builder, n);
}
int t = Tree_.endTree_(builder);
Tree_.finishTree_Buffer(builder, t);
ByteBuffer bb = builder.dataBuffer();
os.write(bb.array(), bb.position(), bb.remaining());
}
use of com.google.flatbuffers.FlatBufferBuilder in project deephaven-core by deephaven.
the class BarrageStreamGenerator method getSnapshotMetadata.
private ByteBuffer getSnapshotMetadata(final SnapshotView view) throws IOException {
final FlatBufferBuilder metadata = new FlatBufferBuilder();
int effectiveViewportOffset = 0;
if (view.isViewport()) {
try (final RowSetGenerator viewportGen = new RowSetGenerator(view.viewport)) {
effectiveViewportOffset = viewportGen.addToFlatBuffer(metadata);
}
}
int effectiveColumnSetOffset = 0;
if (view.subscribedColumns != null) {
effectiveColumnSetOffset = new BitSetGenerator(view.subscribedColumns).addToFlatBuffer(metadata);
}
final int rowsAddedOffset = rowsAdded.addToFlatBuffer(metadata);
// no shifts in a snapshot, but need to provide a valid structure
final int shiftDataOffset = shifted.addToFlatBuffer(metadata);
// Added Chunk Data:
int addedRowsIncludedOffset = 0;
if (view.isViewport()) {
addedRowsIncludedOffset = rowsIncluded.addToFlatBuffer(view.keyspaceViewport, metadata);
}
BarrageUpdateMetadata.startBarrageUpdateMetadata(metadata);
BarrageUpdateMetadata.addNumAddBatches(metadata, LongSizedDataStructure.intSize("BarrageStreamGenerator", (view.numAddRows + view.batchSize() - 1) / view.batchSize()));
BarrageUpdateMetadata.addNumModBatches(metadata, 0);
BarrageUpdateMetadata.addIsSnapshot(metadata, isSnapshot);
BarrageUpdateMetadata.addFirstSeq(metadata, firstSeq);
BarrageUpdateMetadata.addLastSeq(metadata, lastSeq);
BarrageUpdateMetadata.addEffectiveViewport(metadata, effectiveViewportOffset);
BarrageUpdateMetadata.addEffectiveColumnSet(metadata, effectiveColumnSetOffset);
BarrageUpdateMetadata.addAddedRows(metadata, rowsAddedOffset);
BarrageUpdateMetadata.addRemovedRows(metadata, 0);
BarrageUpdateMetadata.addShiftData(metadata, shiftDataOffset);
BarrageUpdateMetadata.addAddedRowsIncluded(metadata, addedRowsIncludedOffset);
BarrageUpdateMetadata.addModColumnNodes(metadata, 0);
BarrageUpdateMetadata.addEffectiveReverseViewport(metadata, view.reverseViewport);
metadata.finish(BarrageUpdateMetadata.endBarrageUpdateMetadata(metadata));
final FlatBufferBuilder header = new FlatBufferBuilder();
final int payloadOffset = BarrageMessageWrapper.createMsgPayloadVector(header, metadata.dataBuffer());
BarrageMessageWrapper.startBarrageMessageWrapper(header);
BarrageMessageWrapper.addMagic(header, BarrageUtil.FLATBUFFER_MAGIC);
BarrageMessageWrapper.addMsgType(header, BarrageMessageType.BarrageUpdateMetadata);
BarrageMessageWrapper.addMsgPayload(header, payloadOffset);
header.finish(BarrageMessageWrapper.endBarrageMessageWrapper(header));
return header.dataBuffer().slice();
}
use of com.google.flatbuffers.FlatBufferBuilder in project deephaven-core by deephaven.
the class BarrageStreamGenerator method getInputStream.
/**
* Returns an InputStream of the message filtered to the viewport. This function accepts `targetBatchSize` but may
* actually write fewer rows than the target (when crossing an internal chunk boundary, e.g.)
*
* @param view the view of the overall chunk to generate a RecordBatch for
* @param offset the start of the batch in position space w.r.t. the view (inclusive)
* @param targetBatchSize the target (and maximum) batch size to use for this message
* @param actualBatchSize the number of rows actually sent in this batch (will be <= targetBatchSize)
* @param metadata the optional flight data metadata to attach to the message
* @param columnVisitor the helper method responsible for appending the payload columns to the RecordBatch
* @return an InputStream ready to be drained by GRPC
*/
private InputStream getInputStream(final View view, final long offset, final int targetBatchSize, final MutableInt actualBatchSize, final ByteBuffer metadata, final ColumnVisitor columnVisitor) throws IOException {
final ArrayDeque<InputStream> streams = new ArrayDeque<>();
final MutableInt size = new MutableInt();
final Consumer<InputStream> addStream = (final InputStream is) -> {
try {
final int sz = is.available();
if (sz == 0) {
is.close();
return;
}
streams.add(is);
size.add(sz);
} catch (final IOException e) {
throw new UncheckedDeephavenException("Unexpected IOException", e);
}
// C++.
if (size.intValue() % 8 != 0) {
final int paddingBytes = (8 - (size.intValue() % 8));
size.add(paddingBytes);
streams.add(new DrainableByteArrayInputStream(PADDING_BUFFER, 0, paddingBytes));
}
};
final FlatBufferBuilder header = new FlatBufferBuilder();
final long numRows;
final int nodesOffset;
final int buffersOffset;
try (final SizedChunk<Values> nodeOffsets = new SizedChunk<>(ChunkType.Object);
final SizedLongChunk<Values> bufferInfos = new SizedLongChunk<>()) {
nodeOffsets.ensureCapacity(addColumnData.length);
nodeOffsets.get().setSize(0);
bufferInfos.ensureCapacity(addColumnData.length * 3);
bufferInfos.get().setSize(0);
final MutableLong totalBufferLength = new MutableLong();
final ChunkInputStreamGenerator.FieldNodeListener fieldNodeListener = (numElements, nullCount) -> {
nodeOffsets.ensureCapacityPreserve(nodeOffsets.get().size() + 1);
nodeOffsets.get().asWritableObjectChunk().add(new ChunkInputStreamGenerator.FieldNodeInfo(numElements, nullCount));
};
final ChunkInputStreamGenerator.BufferListener bufferListener = (length) -> {
totalBufferLength.add(length);
bufferInfos.ensureCapacityPreserve(bufferInfos.get().size() + 1);
bufferInfos.get().add(length);
};
numRows = columnVisitor.visit(view, offset, targetBatchSize, addStream, fieldNodeListener, bufferListener);
actualBatchSize.setValue(numRows);
final WritableChunk<Values> noChunk = nodeOffsets.get();
RecordBatch.startNodesVector(header, noChunk.size());
for (int i = noChunk.size() - 1; i >= 0; --i) {
final ChunkInputStreamGenerator.FieldNodeInfo node = (ChunkInputStreamGenerator.FieldNodeInfo) noChunk.asObjectChunk().get(i);
FieldNode.createFieldNode(header, node.numElements, node.nullCount);
}
nodesOffset = header.endVector();
final WritableLongChunk<Values> biChunk = bufferInfos.get();
RecordBatch.startBuffersVector(header, biChunk.size());
for (int i = biChunk.size() - 1; i >= 0; --i) {
totalBufferLength.subtract(biChunk.get(i));
Buffer.createBuffer(header, totalBufferLength.longValue(), biChunk.get(i));
}
buffersOffset = header.endVector();
}
RecordBatch.startRecordBatch(header);
RecordBatch.addNodes(header, nodesOffset);
RecordBatch.addBuffers(header, buffersOffset);
RecordBatch.addLength(header, numRows);
final int headerOffset = RecordBatch.endRecordBatch(header);
header.finish(MessageHelper.wrapInMessage(header, headerOffset, org.apache.arrow.flatbuf.MessageHeader.RecordBatch, size.intValue()));
// now create the proto header
try (final ExposedByteArrayOutputStream baos = new ExposedByteArrayOutputStream()) {
final CodedOutputStream cos = CodedOutputStream.newInstance(baos);
cos.writeByteBuffer(Flight.FlightData.DATA_HEADER_FIELD_NUMBER, header.dataBuffer().slice());
if (metadata != null) {
cos.writeByteBuffer(Flight.FlightData.APP_METADATA_FIELD_NUMBER, metadata);
}
cos.writeTag(Flight.FlightData.DATA_BODY_FIELD_NUMBER, WireFormat.WIRETYPE_LENGTH_DELIMITED);
cos.writeUInt32NoTag(size.intValue());
cos.flush();
streams.addFirst(new DrainableByteArrayInputStream(baos.peekBuffer(), 0, baos.size()));
return new ConsecutiveDrainableStreams(streams.toArray(new InputStream[0]));
} catch (final IOException ex) {
throw new UncheckedDeephavenException("Unexpected IOException", ex);
}
}
use of com.google.flatbuffers.FlatBufferBuilder in project deephaven-core by deephaven.
the class BarrageUtil method schemaBytesFromTable.
public static ByteString schemaBytesFromTable(final TableDefinition table, final Map<String, Object> attributes) {
// note that flight expects the Schema to be wrapped in a Message prefixed by a 4-byte identifier
// (to detect end-of-stream in some cases) followed by the size of the flatbuffer message
final FlatBufferBuilder builder = new FlatBufferBuilder();
final int schemaOffset = BarrageUtil.makeSchemaPayload(builder, table, attributes);
builder.finish(MessageHelper.wrapInMessage(builder, schemaOffset, org.apache.arrow.flatbuf.MessageHeader.Schema));
return ByteStringAccess.wrap(MessageHelper.toIpcBytes(builder));
}
use of com.google.flatbuffers.FlatBufferBuilder in project deephaven-core by deephaven.
the class DoExchange method execute.
@Override
protected void execute(FlightSession flight) throws Exception {
// need to provide the MAGIC bytes as the FlightDescriptor.cmd in the initial message
// equivalent to '0x6E687064' (ASCII "dphn")
byte[] cmd = new byte[] { 100, 112, 104, 110 };
FlightDescriptor fd = FlightDescriptor.command(cmd);
// create the bi-directional reader/writer
try (FlightClient.ExchangeReaderWriter erw = flight.startExchange(fd);
final RootAllocator allocator = new RootAllocator(Integer.MAX_VALUE)) {
// ///////////////////////////////////////////////////////////
// create a BarrageSnapshotRequest for ticket 's/timetable'
// ///////////////////////////////////////////////////////////
// inner metadata for the snapshot request
final FlatBufferBuilder metadata = new FlatBufferBuilder();
int optOffset = BarrageSnapshotOptions.createBarrageSnapshotOptions(metadata, ColumnConversionMode.Stringify, false, 1000);
final int ticOffset = BarrageSnapshotRequest.createTicketVector(metadata, ScopeTicketHelper.nameToBytes(ticket.scopeField.variable));
BarrageSnapshotRequest.startBarrageSnapshotRequest(metadata);
BarrageSnapshotRequest.addColumns(metadata, 0);
BarrageSnapshotRequest.addViewport(metadata, 0);
BarrageSnapshotRequest.addSnapshotOptions(metadata, optOffset);
BarrageSnapshotRequest.addTicket(metadata, ticOffset);
metadata.finish(BarrageSnapshotRequest.endBarrageSnapshotRequest(metadata));
// outer metadata to ID the message type and provide the MAGIC bytes
final FlatBufferBuilder wrapper = new FlatBufferBuilder();
final int innerOffset = wrapper.createByteVector(metadata.dataBuffer());
wrapper.finish(BarrageMessageWrapper.createBarrageMessageWrapper(wrapper, // the numerical representation of the ASCII "dphn".
0x6E687064, BarrageMessageType.BarrageSnapshotRequest, innerOffset));
// extract the bytes and package them in an ArrowBuf for transmission
cmd = wrapper.sizedByteArray();
ArrowBuf data = allocator.buffer(cmd.length);
data.writeBytes(cmd);
// `putMetadata()` makes the GRPC call
erw.getWriter().putMetadata(data);
// snapshot requests do not need to stay open on the client side
erw.getWriter().completed();
// read everything from the server
while (erw.getReader().next()) {
// NOP
}
// print the table data
System.out.println(erw.getReader().getSchema().toString());
System.out.println(erw.getReader().getRoot().contentToTSVString());
}
}
Aggregations