use of io.netty.util.concurrent.PromiseCombiner in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutput method flush0.
private void flush0(CompletableFuture<Long> future, boolean syncBlock) {
if (state != State.STREAMING) {
future.completeExceptionally(new IOException("stream already broken"));
return;
}
int dataLen = buf.readableBytes();
if (encryptor != null) {
ByteBuf encryptBuf = alloc.directBuffer(dataLen);
try {
encryptor.encrypt(buf.nioBuffer(buf.readerIndex(), dataLen), encryptBuf.nioBuffer(0, dataLen));
} catch (IOException e) {
encryptBuf.release();
future.completeExceptionally(e);
return;
}
encryptBuf.writerIndex(dataLen);
buf.release();
buf = encryptBuf;
}
long lengthAfterFlush = nextPacketOffsetInBlock + dataLen;
if (lengthAfterFlush == locatedBlock.getBlock().getNumBytes()) {
// no new data, just return
future.complete(locatedBlock.getBlock().getNumBytes());
return;
}
Callback c = waitingAckQueue.peekLast();
if (c != null && lengthAfterFlush == c.ackedLength) {
// just append it to the tail of waiting ack queue,, do not issue new hflush request.
waitingAckQueue.addLast(new Callback(eventLoop.<Void>newPromise().addListener(f -> {
if (f.isSuccess()) {
future.complete(lengthAfterFlush);
} else {
future.completeExceptionally(f.cause());
}
}), lengthAfterFlush, Collections.<Channel>emptyList()));
return;
}
Promise<Void> promise;
if (dataLen > maxDataLen) {
// We need to write out the data by multiple packets as the max packet allowed is 16M.
PromiseCombiner combiner = new PromiseCombiner();
long nextSubPacketOffsetInBlock = nextPacketOffsetInBlock;
for (int remaining = dataLen; remaining > 0; ) {
int toWriteDataLen = Math.min(remaining, maxDataLen);
combiner.add(flushBuffer(buf.readRetainedSlice(toWriteDataLen), nextSubPacketOffsetInBlock, syncBlock));
nextSubPacketOffsetInBlock += toWriteDataLen;
remaining -= toWriteDataLen;
}
promise = eventLoop.newPromise();
combiner.finish(promise);
} else {
promise = flushBuffer(buf.retain(), nextPacketOffsetInBlock, syncBlock);
}
promise.addListener(f -> {
if (f.isSuccess()) {
future.complete(lengthAfterFlush);
} else {
future.completeExceptionally(f.cause());
}
});
int trailingPartialChunkLen = dataLen % summer.getBytesPerChecksum();
ByteBuf newBuf = alloc.directBuffer(guess(dataLen)).ensureWritable(trailingPartialChunkLen);
if (trailingPartialChunkLen != 0) {
buf.readerIndex(dataLen - trailingPartialChunkLen).readBytes(newBuf, trailingPartialChunkLen);
}
buf.release();
this.buf = newBuf;
nextPacketOffsetInBlock += dataLen - trailingPartialChunkLen;
}
use of io.netty.util.concurrent.PromiseCombiner in project rest.li by linkedin.
the class Http2StreamCodec method write.
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (!(msg instanceof RequestWithCallback)) {
ctx.write(msg, promise);
return;
}
Request request = ((RequestWithCallback) msg).request();
Http2ConnectionEncoder encoder = encoder();
int streamId = connection().local().incrementAndGetNextStreamId();
if (request instanceof StreamRequest) {
LOG.debug("Writing StreamRequest...");
StreamRequest streamRequest = (StreamRequest) request;
Http2Headers http2Headers = NettyRequestAdapter.toHttp2Headers(streamRequest);
BufferedReader reader = new BufferedReader(ctx, encoder, streamId, ((RequestWithCallback) msg).handle());
streamRequest.getEntityStream().setReader(reader);
encoder.writeHeaders(ctx, streamId, http2Headers, NO_PADDING, NOT_END_STREAM, promise).addListener(future -> reader.request());
LOG.debug("Sent HTTP/2 HEADERS frame, stream={}, end={}, headers={}, padding={}bytes", new Object[] { streamId, NOT_END_STREAM, http2Headers.size(), NO_PADDING });
} else if (request instanceof RestRequest) {
LOG.debug("Writing RestRequest...");
PromiseCombiner promiseCombiner = new PromiseCombiner();
ChannelPromise headersPromise = ctx.channel().newPromise();
ChannelPromise dataPromise = ctx.channel().newPromise();
promiseCombiner.add(headersPromise);
promiseCombiner.add(dataPromise);
promiseCombiner.finish(promise);
RestRequest restRequest = (RestRequest) request;
Http2Headers headers = NettyRequestAdapter.toHttp2Headers(restRequest);
encoder.writeHeaders(ctx, streamId, headers, NO_PADDING, NOT_END_STREAM, headersPromise);
LOG.debug("Sent HTTP/2 HEADERS frame, stream={}, end={}, headers={}, padding={}bytes", new Object[] { streamId, NOT_END_STREAM, headers.size(), NO_PADDING });
ByteBuf data = Unpooled.wrappedBuffer(restRequest.getEntity().asByteBuffer());
encoder.writeData(ctx, streamId, data, NO_PADDING, END_STREAM, dataPromise);
LOG.debug("Sent HTTP/2 DATA frame, stream={}, end={}, data={}bytes, padding={}bytes", new Object[] { streamId, END_STREAM, data.readableBytes(), NO_PADDING });
} else {
// Request type is not supported. Returns channel back to the pool and throws exception.
ctx.fireChannelRead(((RequestWithCallback) msg).handle());
throw new IllegalArgumentException("Request is neither StreamRequest or RestRequest");
}
// Sets TransportCallback as a stream property to be retrieved later
TransportCallback<?> callback = ((RequestWithCallback) msg).callback();
Http2Connection.PropertyKey callbackKey = ctx.channel().attr(Http2ClientPipelineInitializer.CALLBACK_ATTR_KEY).get();
connection().stream(streamId).setProperty(callbackKey, callback);
// Sets AsyncPoolHandle as a stream property to be retrieved later
AsyncPoolHandle<?> handle = ((RequestWithCallback) msg).handle();
Http2Connection.PropertyKey handleKey = ctx.channel().attr(Http2ClientPipelineInitializer.CHANNEL_POOL_HANDLE_ATTR_KEY).get();
connection().stream(streamId).setProperty(handleKey, handle);
}
use of io.netty.util.concurrent.PromiseCombiner in project netty by netty.
the class CompressorHttp2ConnectionEncoder method writeData.
@Override
public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding, final boolean endOfStream, ChannelPromise promise) {
final Http2Stream stream = connection().stream(streamId);
final EmbeddedChannel channel = stream == null ? null : (EmbeddedChannel) stream.getProperty(propertyKey);
if (channel == null) {
// The compressor may be null if no compatible encoding type was found in this stream's headers
return super.writeData(ctx, streamId, data, padding, endOfStream, promise);
}
try {
// The channel will release the buffer after being written
channel.writeOutbound(data);
ByteBuf buf = nextReadableBuf(channel);
if (buf == null) {
if (endOfStream) {
if (channel.finish()) {
buf = nextReadableBuf(channel);
}
return super.writeData(ctx, streamId, buf == null ? Unpooled.EMPTY_BUFFER : buf, padding, true, promise);
}
// END_STREAM is not set and the assumption is data is still forthcoming.
promise.setSuccess();
return promise;
}
PromiseCombiner combiner = new PromiseCombiner();
for (; ; ) {
ByteBuf nextBuf = nextReadableBuf(channel);
boolean compressedEndOfStream = nextBuf == null && endOfStream;
if (compressedEndOfStream && channel.finish()) {
nextBuf = nextReadableBuf(channel);
compressedEndOfStream = nextBuf == null;
}
ChannelPromise bufPromise = ctx.newPromise();
combiner.add(bufPromise);
super.writeData(ctx, streamId, buf, padding, compressedEndOfStream, bufPromise);
if (nextBuf == null) {
break;
}
// Padding is only communicated once on the first iteration
padding = 0;
buf = nextBuf;
}
combiner.finish(promise);
} catch (Throwable cause) {
promise.tryFailure(cause);
} finally {
if (endOfStream) {
cleanup(stream, channel);
}
}
return promise;
}
Aggregations