use of com.datatorrent.stram.tuple.Tuple in project apex-core by apache.
the class SocketStreamTest method verify.
@After
public void verify() throws InterruptedException {
for (int i = 0; i < 100; i++) {
Tuple t = reservoir.sweep();
if (t == null) {
sleep(5);
continue;
}
reservoir.remove();
if (t instanceof EndWindowTuple) {
break;
}
}
eventloop.disconnect(oss);
eventloop.disconnect(iss);
Assert.assertEquals("Received messages", 1, messageCount.get());
}
use of com.datatorrent.stram.tuple.Tuple in project apex-core by apache.
the class InlineStreamTest method test.
@Test
@SuppressWarnings("SleepWhileInLoop")
public void test() throws Exception {
final int totalTupleCount = 5000;
final PassThroughNode<Object> operator1 = new PassThroughNode<>();
final GenericNode node1 = new GenericNode(operator1, new OperatorContext(1, "operator1", new DefaultAttributeMap(), null));
node1.setId(1);
operator1.setup(node1.context);
final PassThroughNode<Object> operator2 = new PassThroughNode<>();
final GenericNode node2 = new GenericNode(operator2, new OperatorContext(2, "operator2", new DefaultAttributeMap(), null));
node2.setId(2);
operator2.setup(node2.context);
StreamContext streamContext = new StreamContext("node1->node2");
final InlineStream stream = new InlineStream(1024);
stream.setup(streamContext);
node1.connectOutputPort("output", stream);
node2.connectInputPort("input", stream.getReservoir());
prev = null;
Sink<Object> sink = new Sink<Object>() {
@Override
public void put(Object payload) {
if (payload instanceof Tuple) {
return;
}
if (prev == null) {
prev = payload;
} else {
if (Integer.valueOf(payload.toString()) - Integer.valueOf(prev.toString()) != 1) {
synchronized (InlineStreamTest.this) {
InlineStreamTest.this.notify();
}
}
prev = payload;
}
if (Integer.valueOf(prev.toString()) == totalTupleCount - 1) {
synchronized (InlineStreamTest.this) {
InlineStreamTest.this.notify();
}
}
}
@Override
public int getCount(boolean reset) {
return 0;
}
};
node2.connectOutputPort("output", sink);
AbstractReservoir reservoir1 = AbstractReservoir.newReservoir("input", 1024 * 5);
node1.connectInputPort("input", reservoir1);
Map<Integer, Node<?>> activeNodes = new ConcurrentHashMap<>();
launchNodeThread(node1, activeNodes);
launchNodeThread(node2, activeNodes);
stream.activate(streamContext);
reservoir1.put(StramTestSupport.generateBeginWindowTuple("irrelevant", 0));
for (int i = 0; i < totalTupleCount; i++) {
reservoir1.put(i);
}
reservoir1.put(StramTestSupport.generateEndWindowTuple("irrelevant", 0));
synchronized (this) {
this.wait(200);
}
Assert.assertNotNull(prev);
Assert.assertEquals("processing complete", totalTupleCount, Integer.valueOf(prev.toString()) + 1);
Assert.assertEquals("active operators", 2, activeNodes.size());
WaitCondition c = new WaitCondition() {
@Override
public boolean isComplete() {
final SweepableReservoir reservoir = stream.getReservoir();
logger.debug("stream {} empty {}, size {}", stream, reservoir.isEmpty(), reservoir.size(false));
return reservoir.isEmpty();
}
};
Assert.assertTrue("operator should finish processing all events within 1 second", StramTestSupport.awaitCompletion(c, 1000));
stream.deactivate();
for (Node<?> node : activeNodes.values()) {
node.shutdown();
}
for (int i = 0; i < 10; i++) {
Thread.sleep(20);
if (activeNodes.isEmpty()) {
break;
}
}
stream.teardown();
operator2.teardown();
operator1.teardown();
Assert.assertEquals("active operators", 0, activeNodes.size());
}
use of com.datatorrent.stram.tuple.Tuple in project apex-core by apache.
the class FastPublisher method put.
@Override
@SuppressWarnings("SleepWhileInLoop")
public void put(Object tuple) {
if (tuple instanceof Tuple) {
final Tuple t = (Tuple) tuple;
byte[] array;
switch(t.getType()) {
case CHECKPOINT:
array = WindowIdTuple.getSerializedTuple((int) t.getWindowId());
array[0] = MessageType.CHECKPOINT_VALUE;
break;
case BEGIN_WINDOW:
array = BeginWindowTuple.getSerializedTuple((int) t.getWindowId());
break;
case END_WINDOW:
array = EndWindowTuple.getSerializedTuple((int) t.getWindowId());
break;
case CUSTOM_CONTROL:
array = null;
// TODO implement
break;
case END_STREAM:
array = EndStreamTuple.getSerializedTuple((int) t.getWindowId());
break;
case RESET_WINDOW:
com.datatorrent.stram.tuple.ResetWindowTuple rwt = (com.datatorrent.stram.tuple.ResetWindowTuple) t;
array = ResetWindowTuple.getSerializedTuple(rwt.getBaseSeconds(), rwt.getIntervalMillis());
break;
default:
throw new UnsupportedOperationException("this data type is not handled in the stream");
}
int size = array.length;
if (writeBuffer.hasRemaining()) {
writeBuffer.put((byte) size);
if (writeBuffer.hasRemaining()) {
writeBuffer.put((byte) (size >> 8));
} else {
synchronized (readBuffers) {
readBuffers[writeIndex].limit(BUFFER_CAPACITY);
}
advanceWriteBuffer();
writeBuffer.put((byte) (size >> 8));
}
} else {
synchronized (readBuffers) {
readBuffers[writeIndex].limit(BUFFER_CAPACITY);
}
advanceWriteBuffer();
writeBuffer.put((byte) size);
writeBuffer.put((byte) (size >> 8));
}
int remaining = writeBuffer.remaining();
if (remaining < size) {
int offset = 0;
do {
writeBuffer.put(array, offset, remaining);
offset += remaining;
size -= remaining;
synchronized (readBuffers) {
readBuffers[writeIndex].limit(BUFFER_CAPACITY);
}
advanceWriteBuffer();
remaining = writeBuffer.remaining();
if (size <= remaining) {
writeBuffer.put(array, offset, size);
break;
}
} while (true);
} else {
writeBuffer.put(array);
}
synchronized (readBuffers) {
readBuffers[writeIndex].limit(writeBuffer.position());
}
} else {
count++;
int hashcode = tuple.hashCode();
int wi = writeIndex;
int position = writeBuffer.position();
int newPosition = position + 2 + /* for short size */
1 + /* for data type */
4;
if (newPosition > BUFFER_CAPACITY) {
writeBuffer.position(BUFFER_CAPACITY);
advanceWriteBuffer();
writeBuffer.position(newPosition - BUFFER_CAPACITY);
} else {
writeBuffer.position(newPosition);
}
writeClassAndObject(output, tuple);
int size;
if (wi == writeIndex) {
size = writeBuffer.position() - position - 2;
assert (size <= Short.MAX_VALUE);
writeBuffer.put(position++, (byte) size);
writeBuffer.put(position++, (byte) (size >> 8));
writeBuffer.put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffer.put(position++, (byte) hashcode);
writeBuffer.put(position++, (byte) (hashcode >> 8));
writeBuffer.put(position++, (byte) (hashcode >> 16));
writeBuffer.put(position, (byte) (hashcode >> 24));
synchronized (readBuffers[wi]) {
readBuffers[wi].limit(writeBuffer.position());
}
} else {
size = BUFFER_CAPACITY - position - 2 + writeBuffer.position();
int index = writeIndex;
synchronized (readBuffers[index]) {
readBuffers[index].position(0);
readBuffers[index].limit(writeBuffer.position());
}
do {
if (index == 0) {
index = lastIndex;
} else {
index--;
}
if (index == wi) {
break;
}
synchronized (readBuffers[index]) {
readBuffers[index].position(0);
readBuffers[index].limit(BUFFER_CAPACITY);
}
size += BUFFER_CAPACITY;
} while (true);
assert (size <= Short.MAX_VALUE);
index = wi;
switch(position) {
case BUFFER_CAPACITY:
position = 0;
if (wi == lastIndex) {
wi = 0;
} else {
wi++;
}
writeBuffers[wi].put(position++, (byte) size);
writeBuffers[wi].put(position++, (byte) (size >> 8));
writeBuffers[wi].put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffers[wi].put(position++, (byte) hashcode);
writeBuffers[wi].put(position++, (byte) (hashcode >> 8));
writeBuffers[wi].put(position++, (byte) (hashcode >> 16));
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
case BUFFER_CAPACITY - 1:
writeBuffers[wi].put(position, (byte) size);
if (wi == lastIndex) {
wi = 0;
} else {
wi++;
}
position = 0;
writeBuffers[wi].put(position++, (byte) (size >> 8));
writeBuffers[wi].put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffers[wi].put(position++, (byte) hashcode);
writeBuffers[wi].put(position++, (byte) (hashcode >> 8));
writeBuffers[wi].put(position++, (byte) (hashcode >> 16));
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
case BUFFER_CAPACITY - 2:
writeBuffers[wi].put(position++, (byte) size);
writeBuffers[wi].put(position, (byte) (size >> 8));
if (wi == lastIndex) {
wi = 0;
} else {
wi++;
}
position = 0;
writeBuffers[wi].put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffers[wi].put(position++, (byte) hashcode);
writeBuffers[wi].put(position++, (byte) (hashcode >> 8));
writeBuffers[wi].put(position++, (byte) (hashcode >> 16));
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
case BUFFER_CAPACITY - 3:
writeBuffers[wi].put(position++, (byte) size);
writeBuffers[wi].put(position++, (byte) (size >> 8));
writeBuffers[wi].put(position, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
if (wi == lastIndex) {
wi = 0;
} else {
wi++;
}
position = 0;
writeBuffers[wi].put(position++, (byte) hashcode);
writeBuffers[wi].put(position++, (byte) (hashcode >> 8));
writeBuffers[wi].put(position++, (byte) (hashcode >> 16));
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
case BUFFER_CAPACITY - 4:
writeBuffers[wi].put(position++, (byte) size);
writeBuffers[wi].put(position++, (byte) (size >> 8));
writeBuffers[wi].put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffers[wi].put(position, (byte) hashcode);
if (wi == lastIndex) {
wi = 0;
} else {
wi++;
}
position = 0;
writeBuffers[wi].put(position++, (byte) (hashcode >> 8));
writeBuffers[wi].put(position++, (byte) (hashcode >> 16));
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
case BUFFER_CAPACITY - 5:
writeBuffers[wi].put(position++, (byte) size);
writeBuffers[wi].put(position++, (byte) (size >> 8));
writeBuffers[wi].put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffers[wi].put(position++, (byte) hashcode);
writeBuffers[wi].put(position, (byte) (hashcode >> 8));
if (wi == lastIndex) {
wi = 0;
} else {
wi++;
}
position = 0;
writeBuffers[wi].put(position++, (byte) (hashcode >> 16));
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
case BUFFER_CAPACITY - 6:
writeBuffers[wi].put(position++, (byte) size);
writeBuffers[wi].put(position++, (byte) (size >> 8));
writeBuffers[wi].put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffers[wi].put(position++, (byte) hashcode);
writeBuffers[wi].put(position++, (byte) (hashcode >> 8));
writeBuffers[wi].put(position, (byte) (hashcode >> 16));
if (wi == lastIndex) {
wi = 0;
} else {
wi++;
}
position = 0;
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
default:
writeBuffers[wi].put(position++, (byte) size);
writeBuffers[wi].put(position++, (byte) (size >> 8));
writeBuffers[wi].put(position++, com.datatorrent.bufferserver.packet.MessageType.PAYLOAD_VALUE);
writeBuffers[wi].put(position++, (byte) hashcode);
writeBuffers[wi].put(position++, (byte) (hashcode >> 8));
writeBuffers[wi].put(position++, (byte) (hashcode >> 16));
writeBuffers[wi].put(position, (byte) (hashcode >> 24));
break;
}
synchronized (readBuffers[index]) {
readBuffers[index].limit(BUFFER_CAPACITY);
}
}
}
if (!write) {
key.interestOps(key.interestOps() | SelectionKey.OP_WRITE);
write = true;
key.selector().wakeup();
}
}
Aggregations