use of org.jboss.netty.buffer.ChannelBuffer in project graphdb by neo4j-attic.
the class Client method sendRequest.
protected <R> Response<R> sendRequest(RequestType<M> type, SlaveContext context, Serializer serializer, Deserializer<R> deserializer) {
Triplet<Channel, ChannelBuffer, ByteBuffer> channelContext = null;
try {
// Send 'em over the wire
channelContext = getChannel();
Channel channel = channelContext.first();
channelContext.second().clear();
ChunkingChannelBuffer chunkingBuffer = new ChunkingChannelBuffer(channelContext.second(), channel, Protocol.MAX_FRAME_LENGTH);
chunkingBuffer.writeByte(type.id());
writeContext(type, context, chunkingBuffer);
serializer.write(chunkingBuffer, channelContext.third());
chunkingBuffer.done();
// Read the response
@SuppressWarnings("unchecked") BlockingReadHandler<ChannelBuffer> reader = (BlockingReadHandler<ChannelBuffer>) channel.getPipeline().get("blockingHandler");
final Triplet<Channel, ChannelBuffer, ByteBuffer> finalChannelContext = channelContext;
DechunkingChannelBuffer dechunkingBuffer = new DechunkingChannelBuffer(reader, DEFAULT_READ_RESPONSE_TIMEOUT_SECONDS) {
@Override
protected ChannelBuffer readNext() {
ChannelBuffer result = super.readNext();
if (result == null) {
channelPool.dispose(finalChannelContext);
throw new ComException("Channel has been closed");
}
return result;
}
};
R response = deserializer.read(dechunkingBuffer, channelContext.third());
StoreId storeId = readStoreId(dechunkingBuffer, channelContext.third());
if (shouldCheckStoreId(type)) {
assertCorrectStoreId(storeId);
}
TransactionStream txStreams = readTransactionStreams(dechunkingBuffer);
return new Response<R>(response, storeId, txStreams);
} catch (ClosedChannelException e) {
channelPool.dispose(channelContext);
throw new ComException(e);
} catch (IOException e) {
throw new ComException(e);
} catch (InterruptedException e) {
throw new ComException(e);
} catch (Exception e) {
throw new ComException(e);
} finally {
releaseChannel();
}
}
use of org.jboss.netty.buffer.ChannelBuffer in project graphdb by neo4j-attic.
the class DechunkingChannelBuffer method readNextChunk.
private void readNextChunk() {
ChannelBuffer readBuffer = readNext();
more = readBuffer.readByte() == ChunkingChannelBuffer.CONTINUATION_MORE;
if (!more && buffer == null) {
// Optimization: this is the first chunk and it'll be the only chunk
// in this message.
buffer = readBuffer;
} else {
buffer = buffer == null ? ChannelBuffers.dynamicBuffer() : buffer;
discardReadBytes();
buffer.writeBytes(readBuffer);
}
}
use of org.jboss.netty.buffer.ChannelBuffer in project neo4j by neo4j.
the class TestBlockLogBuffer method canWriteLargestAtomAfterFillingBuffer.
@Test
public void canWriteLargestAtomAfterFillingBuffer() throws Exception {
byte[] bytes = new byte[300];
ChannelBuffer wrappedBuffer = ChannelBuffers.wrappedBuffer(bytes);
wrappedBuffer.resetWriterIndex();
BlockLogBuffer buffer = new BlockLogBuffer(wrappedBuffer, new Monitors().newMonitor(ByteCounterMonitor.class));
byte[] bytesValue = new byte[255];
bytesValue[0] = 1;
bytesValue[254] = -1;
long longValue = 123456;
buffer.put(bytesValue, bytesValue.length);
buffer.putLong(longValue);
buffer.close();
ByteBuffer verificationBuffer = ByteBuffer.wrap(bytes);
assertEquals((byte) 0, verificationBuffer.get());
byte[] actualBytes = new byte[bytesValue.length];
verificationBuffer.get(actualBytes);
assertThat(actualBytes, new ArrayMatches<byte[]>(bytesValue));
assertEquals((byte) 8, verificationBuffer.get());
assertEquals(longValue, verificationBuffer.getLong());
}
use of org.jboss.netty.buffer.ChannelBuffer in project neo4j by neo4j.
the class TestBlockLogBuffer method canWriteReallyLargeByteArray.
@Test
public void canWriteReallyLargeByteArray() throws Exception {
byte[] bytes = new byte[650];
ChannelBuffer wrappedBuffer = ChannelBuffers.wrappedBuffer(bytes);
wrappedBuffer.resetWriterIndex();
BlockLogBuffer buffer = new BlockLogBuffer(wrappedBuffer, new Monitors().newMonitor(ByteCounterMonitor.class));
byte[] bytesValue = new byte[600];
bytesValue[1] = 1;
bytesValue[99] = 2;
bytesValue[199] = 3;
bytesValue[299] = 4;
bytesValue[399] = 5;
bytesValue[499] = 6;
bytesValue[599] = 7;
buffer.put(bytesValue, bytesValue.length);
buffer.close();
byte[] actual;
ByteBuffer verificationBuffer = ByteBuffer.wrap(bytes);
assertEquals((byte) 0, verificationBuffer.get());
actual = new byte[255];
verificationBuffer.get(actual);
assertThat(actual, new ArrayMatches<byte[]>(Arrays.copyOfRange(bytesValue, 0, 255)));
assertEquals((byte) 0, verificationBuffer.get());
actual = new byte[255];
verificationBuffer.get(actual);
assertThat(actual, new ArrayMatches<byte[]>(Arrays.copyOfRange(bytesValue, 255, 510)));
assertEquals((byte) 90, verificationBuffer.get());
actual = new byte[90];
verificationBuffer.get(actual);
assertThat(actual, new ArrayMatches<byte[]>(Arrays.copyOfRange(bytesValue, 510, 600)));
}
use of org.jboss.netty.buffer.ChannelBuffer in project neo4j by neo4j.
the class TestBlockLogBuffer method readSmallPortions.
@Test
public void readSmallPortions() throws IOException {
byte[] bytes = new byte[255];
ChannelBuffer wrappedBuffer = ChannelBuffers.wrappedBuffer(bytes);
wrappedBuffer.resetWriterIndex();
BlockLogBuffer buffer = new BlockLogBuffer(wrappedBuffer, new Monitors().newMonitor(ByteCounterMonitor.class));
byte byteValue = 5;
int intValue = 1234;
long longValue = 574853;
buffer.put(byteValue);
buffer.putInt(intValue);
buffer.putLong(longValue);
buffer.close();
ReadableByteChannel reader = new BlockLogReader(wrappedBuffer);
ByteBuffer verificationBuffer = ByteBuffer.wrap(new byte[1]);
reader.read(verificationBuffer);
verificationBuffer.flip();
assertEquals(byteValue, verificationBuffer.get());
verificationBuffer = ByteBuffer.wrap(new byte[4]);
reader.read(verificationBuffer);
verificationBuffer.flip();
assertEquals(intValue, verificationBuffer.getInt());
verificationBuffer = ByteBuffer.wrap(new byte[8]);
reader.read(verificationBuffer);
verificationBuffer.flip();
assertEquals(longValue, verificationBuffer.getLong());
}
Aggregations