use of com.hazelcast.internal.networking.HandlerStatus in project hazelcast by hazelcast.
the class NioInboundPipeline method process.
@Override
void process() throws Exception {
int readBytes = socketChannel.read(receiveBuffer);
if (readBytes == -1) {
throw new EOFException("Remote socket closed!");
}
if (readBytes > 0) {
processCount.inc();
lastReadTime = currentTimeMillis();
bytesRead.inc(readBytes);
}
// currently the whole pipeline is retried when one of the handlers is dirty; but only the dirty handler
// and the remaining sequence should need to retry.
InboundHandler[] localHandlers = handlers;
boolean cleanPipeline;
boolean unregisterRead;
do {
cleanPipeline = true;
unregisterRead = false;
for (int handlerIndex = 0; handlerIndex < localHandlers.length; handlerIndex++) {
InboundHandler handler = localHandlers[handlerIndex];
HandlerStatus handlerStatus = handler.onRead();
if (localHandlers != handlers) {
// change in the pipeline detected, restarting loop
handlerIndex = -1;
localHandlers = handlers;
continue;
}
switch(handlerStatus) {
case CLEAN:
break;
case DIRTY:
cleanPipeline = false;
break;
case BLOCKED:
// setting cleanPipeline to true keep flushing everything downstream, but not upstream.
cleanPipeline = true;
unregisterRead = true;
break;
default:
throw new IllegalStateException();
}
}
} while (!cleanPipeline);
if (migrationRequested()) {
startMigration();
return;
}
if (unregisterRead) {
unregisterOp(OP_READ);
}
}
use of com.hazelcast.internal.networking.HandlerStatus in project hazelcast by hazelcast.
the class NioOutboundPipeline method process.
// is never called concurrently!
@Override
@SuppressWarnings("unchecked")
public void process() throws Exception {
processCount.inc();
OutboundHandler[] localHandlers = handlers;
HandlerStatus pipelineStatus = CLEAN;
for (int handlerIndex = 0; handlerIndex < localHandlers.length; handlerIndex++) {
OutboundHandler handler = localHandlers[handlerIndex];
HandlerStatus handlerStatus = handler.onWrite();
if (localHandlers != handlers) {
// change in the pipeline detected, therefor the loop is restarted.
localHandlers = handlers;
pipelineStatus = CLEAN;
handlerIndex = -1;
} else if (handlerStatus != CLEAN) {
pipelineStatus = handlerStatus;
}
}
flushToSocket();
if (migrationRequested()) {
startMigration();
// So we don't need to worry about write-through
return;
}
if (sendBuffer.remaining() > 0) {
pipelineStatus = DIRTY;
}
switch(pipelineStatus) {
case CLEAN:
postProcessClean();
break;
case DIRTY:
postProcessDirty();
break;
case BLOCKED:
postProcessBlocked();
break;
default:
throw new IllegalStateException();
}
}
use of com.hazelcast.internal.networking.HandlerStatus in project hazelcast by hazelcast.
the class PacketEncoderTest method whenPacketFullyWritten.
@Test
public void whenPacketFullyWritten() {
final Packet packet = new Packet(serializationService.toBytes("foobar"));
ByteBuffer dst = ByteBuffer.allocate(1000);
upcast(dst).flip();
PacketSupplier src = new PacketSupplier();
src.queue.add(packet);
encoder.dst(dst);
encoder.src(src);
HandlerStatus result = encoder.onWrite();
assertEquals(CLEAN, result);
// now we read out the dst and check if we can find the written packet.
Packet resultPacket = new PacketIOHelper().readFrom(dst);
assertEquals(packet, resultPacket);
}
use of com.hazelcast.internal.networking.HandlerStatus in project hazelcast by hazelcast.
the class PacketEncoderTest method whenNotEnoughSpace.
@Test
public void whenNotEnoughSpace() {
final Packet packet = new Packet(serializationService.toBytes(new byte[2000]));
ByteBuffer dst = ByteBuffer.allocate(1000);
upcast(dst).flip();
PacketSupplier src = new PacketSupplier();
src.queue.add(packet);
encoder.dst(dst);
encoder.src(src);
HandlerStatus result = encoder.onWrite();
assertEquals(DIRTY, result);
}
use of com.hazelcast.internal.networking.HandlerStatus in project hazelcast by hazelcast.
the class ClientMessageSplitAndBuildTest method splitAndBuild_multipleMessages.
@Test
public void splitAndBuild_multipleMessages() {
Queue<ClientMessage> outputQueue = new ConcurrentLinkedQueue<>();
List<ClientMessage> fragments1 = getFragments(128, clientMessage1);
List<ClientMessage> fragments2 = getFragments(128, clientMessage2);
Iterator<ClientMessage> iterator = fragments2.iterator();
for (ClientMessage fragment : fragments1) {
outputQueue.offer(fragment);
outputQueue.offer(iterator.next());
}
ClientMessageEncoder encoder = new ClientMessageEncoder();
encoder.src(outputQueue::poll);
ByteBuffer buffer = ByteBuffer.allocate(100000);
upcast(buffer).flip();
encoder.dst(buffer);
HandlerStatus result = encoder.onWrite();
Assert.assertEquals(CLEAN, result);
Queue<ClientMessage> inputQueue = new ConcurrentLinkedQueue<>();
ClientMessageDecoder decoder = new ClientMessageDecoder(null, inputQueue::offer, null);
decoder.setNormalPacketsRead(SwCounter.newSwCounter());
upcast(buffer).position(buffer.limit());
decoder.src(buffer);
decoder.onRead();
ClientMessage actualMessage1 = inputQueue.poll();
ClientMessage actualMessage2 = inputQueue.poll();
assertMessageEquals(clientMessage1, actualMessage1);
assertMessageEquals(clientMessage2, actualMessage2);
assertEquals(0, decoder.builderBySessionIdMap.size());
}
Aggregations