use of org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf in project netty by netty.
the class Http2CodecTest method createOutboundStream.
@Test
public void createOutboundStream() {
Http2StreamChannelBootstrap b = new Http2StreamChannelBootstrap();
Channel childChannel = b.parentChannel(clientChannel).handler(new TestChannelInitializer()).connect().syncUninterruptibly().channel();
assertTrue(childChannel.isRegistered());
assertTrue(childChannel.isActive());
Http2Headers headers = new DefaultHttp2Headers();
childChannel.write(new DefaultHttp2HeadersFrame(headers));
ByteBuf data = Unpooled.buffer(100).writeZero(100);
childChannel.writeAndFlush(new DefaultHttp2DataFrame(data, true));
Http2HeadersFrame headersFrame = serverLastInboundHandler.blockingReadInbound();
assertNotNull(headersFrame);
assertEquals(3, headersFrame.streamId());
assertEquals(headers, headersFrame.headers());
Http2DataFrame dataFrame = serverLastInboundHandler.blockingReadInbound();
assertNotNull(dataFrame);
assertEquals(3, dataFrame.streamId());
assertEquals(data.resetReaderIndex(), dataFrame.content());
assertTrue(dataFrame.isEndStream());
dataFrame.release();
childChannel.close();
Http2ResetFrame rstFrame = serverLastInboundHandler.blockingReadInbound();
assertNotNull(rstFrame);
assertEquals(3, rstFrame.streamId());
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf in project netty by netty.
the class HpackDecoderTest method testDecodeLargerThanMaxHeaderListSizeButSmallerThanMaxHeaderListSizeUpdatesDynamicTable.
@Test
public void testDecodeLargerThanMaxHeaderListSizeButSmallerThanMaxHeaderListSizeUpdatesDynamicTable() throws Http2Exception {
ByteBuf in = Unpooled.buffer(200);
try {
hpackDecoder.setMaxHeaderListSize(100, 200);
HpackEncoder hpackEncoder = new HpackEncoder(true);
// encode headers that are slightly larger than maxHeaderListSize
// but smaller than maxHeaderListSizeGoAway
Http2Headers toEncode = new DefaultHttp2Headers();
toEncode.add("test_1", "1");
toEncode.add("test_2", "2");
toEncode.add("long", String.format("%0100d", 0).replace('0', 'A'));
toEncode.add("test_3", "3");
hpackEncoder.encodeHeaders(1, in, toEncode, NEVER_SENSITIVE);
// decode the headers, we should get an exception, but
// the decoded headers object should contain all of the headers
Http2Headers decoded = new DefaultHttp2Headers();
try {
hpackDecoder.decode(1, in, decoded);
fail();
} catch (Http2Exception e) {
assertTrue(e instanceof Http2Exception.HeaderListSizeException);
}
assertEquals(4, decoded.size());
assertTrue(decoded.contains("test_3"));
} finally {
in.release();
}
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf in project netty by netty.
the class PemPrivateKey method toPEM.
/**
* Creates a {@link PemEncoded} value from the {@link PrivateKey}.
*/
static PemEncoded toPEM(ByteBufAllocator allocator, boolean useDirect, PrivateKey key) {
// into OpenSSL without having to do any of the extra work.
if (key instanceof PemEncoded) {
return ((PemEncoded) key).retain();
}
ByteBuf encoded = Unpooled.wrappedBuffer(key.getEncoded());
try {
ByteBuf base64 = SslUtils.toBase64(allocator, encoded);
try {
int size = BEGIN_PRIVATE_KEY.length + base64.readableBytes() + END_PRIVATE_KEY.length;
boolean success = false;
final ByteBuf pem = useDirect ? allocator.directBuffer(size) : allocator.buffer(size);
try {
pem.writeBytes(BEGIN_PRIVATE_KEY);
pem.writeBytes(base64);
pem.writeBytes(END_PRIVATE_KEY);
PemValue value = new PemValue(pem, true);
success = true;
return value;
} finally {
// Make sure we never leak that PEM ByteBuf if there's an Exception.
if (!success) {
SslUtils.zerooutAndRelease(pem);
}
}
} finally {
SslUtils.zerooutAndRelease(base64);
}
} finally {
SslUtils.zerooutAndRelease(encoded);
}
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf in project neo4j by neo4j.
the class ReplicatedTransactionFactory method createImmutableReplicatedTransaction.
public static ReplicatedTransaction createImmutableReplicatedTransaction(TransactionRepresentation tx) {
ByteBuf transactionBuffer = Unpooled.buffer();
NetworkFlushableChannelNetty4 channel = new NetworkFlushableChannelNetty4(transactionBuffer, MAX_SERIALIZED_TX_SIZE);
try {
TransactionSerializer.write(tx, channel);
} catch (MessageTooBigException e) {
throw new IllegalStateException("Transaction size was too large to replicate across the cluster.", e);
} catch (IOException e) {
// Easier said than done though, we use the LogEntry handling routines which throw IOException
throw new RuntimeException(e);
}
/*
* This trims down the array to send up to the actual index it was written. While sending additional zeroes
* is safe, since LogEntryReader stops reading once it sees a zero entry, it is wasteful.
*/
byte[] txBytes = Arrays.copyOf(transactionBuffer.array(), transactionBuffer.writerIndex());
transactionBuffer.release();
return new ReplicatedTransaction(txBytes);
}
use of org.apache.flink.shaded.netty4.io.netty.buffer.ByteBuf in project neo4j by neo4j.
the class ReplicatedTokenRequestSerializer method commandBytes.
public static byte[] commandBytes(Collection<StorageCommand> commands) {
ByteBuf commandBuffer = Unpooled.buffer();
NetworkFlushableChannelNetty4 channel = new NetworkFlushableChannelNetty4(commandBuffer);
try {
new LogEntryWriter(channel).serialize(commands);
} catch (IOException e) {
// TODO: Handle or throw.
e.printStackTrace();
}
byte[] commandsBytes = commandBuffer.array().clone();
commandBuffer.release();
return commandsBytes;
}
Aggregations