use of io.netty.buffer.ByteBufOutputStream in project zipkin by openzipkin.
the class BulkCallBuilder method serialize.
static <T> ByteBuf serialize(ByteBufAllocator alloc, IndexEntry<T> entry, boolean shouldAddType) {
// Fuzzily assume a general small span is 600 bytes to reduce resizing while building up the
// JSON. Any extra bytes will be released back after serializing the document.
ByteBuf document = alloc.heapBuffer(600);
ByteBuf metadata = alloc.heapBuffer(200);
try {
String id = entry.writer().writeDocument(entry.input(), new ByteBufOutputStream(document));
writeIndexMetadata(new ByteBufOutputStream(metadata), entry, id, shouldAddType);
ByteBuf payload = alloc.ioBuffer(document.readableBytes() + metadata.readableBytes() + 2);
try {
payload.writeBytes(metadata).writeByte('\n').writeBytes(document).writeByte('\n');
} catch (Throwable t) {
payload.release();
propagateIfFatal(t);
Exceptions.throwUnsafely(t);
}
return payload;
} finally {
document.release();
metadata.release();
}
}
use of io.netty.buffer.ByteBufOutputStream in project zipkin by openzipkin.
the class ZipkinQueryApiV2 method maybeCacheNames.
/**
* We cache names if there are more than 3 names. This helps people getting started: if we cache
* empty results, users have more questions. We assume caching becomes a concern when zipkin is in
* active use, and active use usually implies more than 3 services.
*/
AggregatedHttpResponse maybeCacheNames(boolean shouldCacheControl, List<String> values, ByteBufAllocator alloc) {
Collections.sort(values);
// Two brackets.
int sizeEstimate = 2;
for (String value : values) {
sizeEstimate += value.length() + 1;
}
// Last element doesn't have a comma.
sizeEstimate -= 1;
// If the values don't require escaping, this buffer will not be resized.
ByteBuf buf = alloc.buffer(sizeEstimate);
try (JsonGenerator gen = JsonUtil.JSON_FACTORY.createGenerator((OutputStream) new ByteBufOutputStream(buf))) {
gen.writeStartArray(values.size());
for (String value : values) {
gen.writeString(value);
}
gen.writeEndArray();
} catch (IOException e) {
buf.release();
throw new UncheckedIOException(e);
}
ResponseHeadersBuilder headers = ResponseHeaders.builder(200).contentType(MediaType.JSON).setInt(HttpHeaderNames.CONTENT_LENGTH, buf.readableBytes());
if (shouldCacheControl) {
headers = headers.add(CACHE_CONTROL, "max-age=" + namesMaxAge + ", must-revalidate");
}
return AggregatedHttpResponse.of(headers.build(), HttpData.wrap(buf));
}
use of io.netty.buffer.ByteBufOutputStream in project Railcraft by Railcraft.
the class PacketBuilder method sendGuiWidgetPacket.
public void sendGuiWidgetPacket(IContainerListener listener, int windowId, Widget widget) {
if (listener instanceof EntityPlayerMP && widget.hasServerSyncData(listener)) {
ByteBuf byteBuf = Unpooled.buffer();
try (ByteBufOutputStream out = new ByteBufOutputStream(byteBuf);
RailcraftOutputStream data = new RailcraftOutputStream(out)) {
widget.writeServerSyncData(listener, data);
byte[] syncData = new byte[byteBuf.readableBytes()];
byteBuf.readBytes(syncData);
PacketGuiWidget pkt = new PacketGuiWidget(windowId, widget, syncData);
PacketDispatcher.sendToPlayer(pkt, (EntityPlayerMP) listener);
} catch (IOException ex) {
if (Game.DEVELOPMENT_VERSION)
throw new RuntimeException(ex);
} finally {
byteBuf.release();
}
}
}
use of io.netty.buffer.ByteBufOutputStream in project drill by apache.
the class RpcEncoder method encode.
@Override
protected void encode(ChannelHandlerContext ctx, OutboundRpcMessage msg, List<Object> out) throws Exception {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Rpc Encoder called with msg {}", msg);
}
if (!ctx.channel().isOpen()) {
// output.add(ctx.alloc().buffer(0));
logger.debug("Channel closed, skipping encode.");
msg.release();
return;
}
try {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Encoding outbound message {}", msg);
}
// first we build the RpcHeader
RpcHeader header = //
RpcHeader.newBuilder().setMode(//
msg.mode).setCoordinationId(//
msg.coordinationId).setRpcType(msg.rpcType).build();
// figure out the full length
int headerLength = header.getSerializedSize();
int protoBodyLength = msg.pBody.getSerializedSize();
int rawBodyLength = msg.getRawBodySize();
//
int fullLength = //
HEADER_TAG_LENGTH + getRawVarintSize(headerLength) + headerLength + PROTOBUF_BODY_TAG_LENGTH + getRawVarintSize(protoBodyLength) + //
protoBodyLength;
if (rawBodyLength > 0) {
fullLength += (RAW_BODY_TAG_LENGTH + getRawVarintSize(rawBodyLength) + rawBodyLength);
}
ByteBuf buf = ctx.alloc().buffer();
OutputStream os = new ByteBufOutputStream(buf);
CodedOutputStream cos = CodedOutputStream.newInstance(os);
// write full length first (this is length delimited stream).
cos.writeRawVarint32(fullLength);
// write header
cos.writeRawVarint32(HEADER_TAG);
cos.writeRawVarint32(headerLength);
header.writeTo(cos);
// write protobuf body length and body
cos.writeRawVarint32(PROTOBUF_BODY_TAG);
cos.writeRawVarint32(protoBodyLength);
msg.pBody.writeTo(cos);
// if exists, write data body and tag.
if (msg.getRawBodySize() > 0) {
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Writing raw body of size {}", msg.getRawBodySize());
}
cos.writeRawVarint32(RAW_BODY_TAG);
cos.writeRawVarint32(rawBodyLength);
// need to flush so that dbody goes after if cos is caching.
cos.flush();
final CompositeByteBuf cbb = ctx.alloc().compositeBuffer(msg.dBodies.length + 1);
cbb.addComponent(buf);
int bufLength = buf.readableBytes();
for (ByteBuf b : msg.dBodies) {
cbb.addComponent(b);
bufLength += b.readableBytes();
}
cbb.writerIndex(bufLength);
out.add(cbb);
} else {
cos.flush();
out.add(buf);
}
if (RpcConstants.SOME_DEBUGGING) {
logger.debug("Wrote message length {}:{} bytes (head:body). Message: " + msg, getRawVarintSize(fullLength), fullLength);
}
if (RpcConstants.EXTRA_DEBUGGING) {
logger.debug("Sent message. Ending writer index was {}.", buf.writerIndex());
}
} finally {
// make sure to release Rpc Messages underlying byte buffers.
// msg.release();
}
}
Aggregations