use of com.datatorrent.api.Sink in project apex-malhar by apache.
the class EventIncrementerTest method testNodeProcessing.
/**
* Test oper logic emits correct results
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testNodeProcessing() throws Exception {
EventIncrementer oper = new EventIncrementer();
DataSink dataSink = new DataSink();
CountSink countSink = new CountSink();
oper.data.setSink(dataSink);
oper.count.setSink(countSink);
Sink seedSink = oper.seed.getSink();
Sink incrSink = oper.increment.getSink();
ArrayList<String> keys = new ArrayList<String>(2);
ArrayList<Double> low = new ArrayList<Double>(2);
ArrayList<Double> high = new ArrayList<Double>(2);
keys.add("x");
keys.add("y");
low.add(1.0);
low.add(1.0);
high.add(100.0);
high.add(100.0);
oper.setKeylimits(keys, low, high);
oper.setDelta(1);
oper.beginWindow(0);
HashMap<String, Object> stuple = new HashMap<String, Object>(1);
// int numtuples = 100000000; // For benchmarking
int numtuples = 1000;
String seed1 = "a";
ArrayList val = new ArrayList();
val.add(10);
val.add(20);
stuple.put(seed1, val);
for (int i = 0; i < numtuples; i++) {
seedSink.put(stuple);
}
oper.endWindow();
LOG.debug(String.format("\n*************************\nEmitted %d tuples, Processed %d tuples, Received %d tuples\n******************\n", numtuples, oper.tuple_count, dataSink.count));
for (Map.Entry<String, String> e : dataSink.collectedTuples.entrySet()) {
LOG.debug(String.format("Got key (%s) and value (%s)", e.getKey(), e.getValue()));
}
oper.beginWindow(0);
HashMap<String, Object> ixtuple = new HashMap<String, Object>(1);
HashMap<String, Integer> ixval = new HashMap<String, Integer>(1);
ixval.put("x", 10);
ixtuple.put("a", ixval);
HashMap<String, Object> iytuple = new HashMap<String, Object>(1);
HashMap<String, Integer> iyval = new HashMap<String, Integer>(1);
iyval.put("y", 10);
iytuple.put("a", iyval);
for (int i = 0; i < numtuples; i++) {
incrSink.put(ixtuple);
incrSink.put(iytuple);
}
oper.endWindow();
LOG.debug(String.format("\n*************************\nEmitted %d tuples, Processed %d tuples, Received %d tuples\n******************\n", numtuples * 2, oper.tuple_count, countSink.count));
for (Map.Entry<String, String> e : dataSink.collectedTuples.entrySet()) {
LOG.debug(String.format("Got key (%s) and value (%s)", e.getKey(), e.getValue()));
}
}
use of com.datatorrent.api.Sink in project apex-malhar by apache.
the class ElasticSearchOperatorTest method readFromES.
/**
* Read data written to elastic search
*
* @param tupleIDs
* @param testStartTime
*/
private List<String> readFromES(List<String> writtenTupleIDs, final long testStartTime) {
ElasticSearchMapInputOperator<Map<String, Object>> operator = new ElasticSearchMapInputOperator<Map<String, Object>>() {
/**
* Set SearchRequestBuilder parameters specific to current window.
*
* @see org.apache.apex.malhar.contrib.elasticsearch.ElasticSearchMapInputOperator#getSearchRequestBuilder()
*/
@Override
protected SearchRequestBuilder getSearchRequestBuilder() {
long time = System.currentTimeMillis();
return // Filter
searchRequestBuilder.setPostFilter(FilterBuilders.rangeFilter(POST_DATE).from(testStartTime).to(time)).setSize(15).setExplain(false);
}
};
operator.setIndexName(INDEX_NAME);
operator.setType(TYPE);
operator.setStore(createStore());
final List<String> tupleIDsRead = new ArrayList<String>();
Sink sink = new Sink<Map<String, Object>>() {
@Override
public void put(Map<String, Object> tuple) {
tupleIDsRead.add(tuple.get(ID_FIELD).toString());
}
@Override
public int getCount(boolean reset) {
return 0;
}
};
operator.outputPort.setSink(sink);
operator.setup(null);
for (int windowId = 1; windowId <= 1; windowId++) {
operator.beginWindow(windowId);
operator.emitTuples();
operator.endWindow();
}
operator.teardown();
return tupleIDsRead;
}
use of com.datatorrent.api.Sink in project apex-core by apache.
the class FastStreamTest method testBufferServerStream.
/**
* Test buffer server stream by sending
* tuple on outputstream and receive same tuple from inputstream
*
* @throws Exception
*/
@Test
@SuppressWarnings({ "SleepWhileInLoop" })
public void testBufferServerStream() throws Exception {
final StreamCodec<Object> serde = new DefaultStatefulStreamCodec<>();
final AtomicInteger messageCount = new AtomicInteger();
Sink<Object> sink = new Sink<Object>() {
@Override
public void put(Object tuple) {
logger.debug("received: " + tuple);
messageCount.incrementAndGet();
}
@Override
public int getCount(boolean reset) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
String streamName = "streamName";
String upstreamNodeId = "upstreamNodeId";
String downstreamNodeId = "downStreamNodeId";
StreamContext issContext = new StreamContext(streamName);
issContext.setSourceId(upstreamNodeId);
issContext.setSinkId(downstreamNodeId);
issContext.setFinishedWindowId(-1);
issContext.setBufferServerAddress(InetSocketAddress.createUnresolved("localhost", bufferServerPort));
issContext.put(StreamContext.CODEC, serde);
issContext.put(StreamContext.EVENT_LOOP, eventloop);
FastSubscriber subscriber = new FastSubscriber(downstreamNodeId, 1024);
subscriber.setup(issContext);
SweepableReservoir reservoir = subscriber.acquireReservoir("testReservoir", 1);
reservoir.setSink(sink);
StreamContext ossContext = new StreamContext(streamName);
ossContext.setSourceId(upstreamNodeId);
ossContext.setSinkId(downstreamNodeId);
ossContext.setFinishedWindowId(-1);
ossContext.setBufferServerAddress(InetSocketAddress.createUnresolved("localhost", bufferServerPort));
ossContext.put(StreamContext.CODEC, serde);
ossContext.put(StreamContext.EVENT_LOOP, eventloop);
FastPublisher publisher = new FastPublisher(upstreamNodeId, 8);
StreamContext publisherContext = new StreamContext(streamName);
publisherContext.setSourceId(upstreamNodeId);
publisherContext.setSinkId(downstreamNodeId);
publisherContext.setBufferServerAddress(InetSocketAddress.createUnresolved("localhost", bufferServerPort));
publisherContext.put(StreamContext.CODEC, serde);
publisherContext.put(StreamContext.EVENT_LOOP, eventloop);
publisher.setup(publisherContext);
subscriber.activate(issContext);
LOG.debug("input stream activated");
publisher.activate(publisherContext);
LOG.debug("output stream activated");
LOG.debug("Sending hello message");
publisher.put(StramTestSupport.generateBeginWindowTuple(upstreamNodeId, 0));
publisher.put(StramTestSupport.generateTuple("hello", 0));
publisher.put(StramTestSupport.generateEndWindowTuple(upstreamNodeId, 0));
// it's a spurious tuple, presence of it should not affect the outcome of the test.
publisher.put(StramTestSupport.generateBeginWindowTuple(upstreamNodeId, 1));
for (int i = 0; i < 100; i++) {
Tuple t = reservoir.sweep();
if (t == null) {
sleep(5);
continue;
}
reservoir.remove();
if (t instanceof EndWindowTuple) {
break;
}
}
eventloop.disconnect(publisher);
eventloop.disconnect(subscriber);
Assert.assertEquals("Received messages", 1, messageCount.get());
}
use of com.datatorrent.api.Sink in project apex-core by apache.
the class WindowGeneratorTest method testWindowGen.
@Test
public void testWindowGen() throws Exception {
final AtomicLong currentWindow = new AtomicLong();
final AtomicInteger beginWindowCount = new AtomicInteger();
final AtomicInteger endWindowCount = new AtomicInteger();
final AtomicLong windowXor = new AtomicLong();
Sink<Object> s = new Sink<Object>() {
@Override
public int getCount(boolean reset) {
return 0;
}
@Override
public void put(Object payload) {
logger.debug("unexpected payload {}", payload);
}
};
ScheduledThreadPoolExecutor stpe = new ScheduledThreadPoolExecutor(1, "WindowGenerator");
int windowWidth = 200;
long firstWindowMillis = stpe.getCurrentTimeMillis();
firstWindowMillis -= firstWindowMillis % 1000L;
WindowGenerator wg = new WindowGenerator(new ScheduledThreadPoolExecutor(1, "WindowGenerator"), WindowGenerator.MAX_WINDOW_ID + 1024);
wg.setResetWindow(firstWindowMillis);
wg.setFirstWindow(firstWindowMillis);
wg.setWindowWidth(windowWidth);
SweepableReservoir reservoir = wg.acquireReservoir("GeneratorTester", windowWidth);
reservoir.setSink(s);
wg.activate(null);
Thread.sleep(200);
wg.deactivate();
reservoir.sweep();
/* just transfer over all the control tuples */
Tuple t;
while ((t = reservoir.sweep()) != null) {
reservoir.remove();
long windowId = t.getWindowId();
switch(t.getType()) {
case BEGIN_WINDOW:
currentWindow.set(windowId);
beginWindowCount.incrementAndGet();
windowXor.set(windowXor.get() ^ windowId);
break;
case END_WINDOW:
endWindowCount.incrementAndGet();
windowXor.set(windowXor.get() ^ windowId);
break;
case RESET_WINDOW:
break;
default:
currentWindow.set(0);
break;
}
}
long lastWindowMillis = System.currentTimeMillis();
Assert.assertEquals("only last window open", currentWindow.get(), windowXor.get());
long expectedCnt = (lastWindowMillis - firstWindowMillis) / windowWidth;
Assert.assertTrue("Minimum begin window count", expectedCnt + 1 <= beginWindowCount.get());
Assert.assertEquals("end window count", beginWindowCount.get() - 1, endWindowCount.get());
}
use of com.datatorrent.api.Sink in project apex-core by apache.
the class GenericNodeTest method testSynchingLogic.
@Test
@SuppressWarnings("SleepWhileInLoop")
public void testSynchingLogic() throws InterruptedException {
long sleeptime = 25L;
final ArrayList<Object> list = new ArrayList<>();
GenericOperator go = new GenericOperator();
final GenericNode gn = new GenericNode(go, new com.datatorrent.stram.engine.OperatorContext(0, "operator", new DefaultAttributeMap(), null));
gn.setId(1);
AbstractReservoir reservoir1 = AbstractReservoir.newReservoir("ip1Res", 1024);
AbstractReservoir reservoir2 = AbstractReservoir.newReservoir("ip2Res", 1024);
Sink<Object> output = new Sink<Object>() {
@Override
public void put(Object tuple) {
list.add(tuple);
}
@Override
public int getCount(boolean reset) {
return 0;
}
};
gn.connectInputPort("ip1", reservoir1);
gn.connectInputPort("ip2", reservoir2);
gn.connectOutputPort("op", output);
gn.firstWindowMillis = 0;
gn.windowWidthMillis = 100;
final AtomicBoolean ab = new AtomicBoolean(false);
Thread t = new Thread() {
@Override
public void run() {
ab.set(true);
gn.activate();
gn.run();
gn.deactivate();
}
};
t.start();
do {
Thread.sleep(sleeptime);
} while (ab.get() == false);
Tuple beginWindow1 = new Tuple(MessageType.BEGIN_WINDOW, 0x1L);
reservoir1.add(beginWindow1);
Thread.sleep(sleeptime);
Assert.assertEquals(1, list.size());
reservoir2.add(beginWindow1);
Thread.sleep(sleeptime);
Assert.assertEquals(1, list.size());
Tuple endWindow1 = new EndWindowTuple(0x1L);
reservoir1.add(endWindow1);
Thread.sleep(sleeptime);
Assert.assertEquals(1, list.size());
Tuple beginWindow2 = new Tuple(MessageType.BEGIN_WINDOW, 0x2L);
reservoir1.add(beginWindow2);
Thread.sleep(sleeptime);
Assert.assertEquals(1, list.size());
reservoir2.add(endWindow1);
Thread.sleep(sleeptime);
Assert.assertEquals(3, list.size());
reservoir2.add(beginWindow2);
Thread.sleep(sleeptime);
Assert.assertEquals(3, list.size());
Tuple endWindow2 = new EndWindowTuple(0x2L);
reservoir2.add(endWindow2);
Thread.sleep(sleeptime);
Assert.assertEquals(3, list.size());
reservoir1.add(endWindow2);
Thread.sleep(sleeptime);
Assert.assertEquals(4, list.size());
EndStreamTuple est = new EndStreamTuple(0L);
reservoir1.add(est);
Thread.sleep(sleeptime);
Assert.assertEquals(4, list.size());
Tuple beginWindow3 = new Tuple(MessageType.BEGIN_WINDOW, 0x3L);
reservoir2.add(beginWindow3);
Thread.sleep(sleeptime);
Assert.assertEquals(5, list.size());
Tuple endWindow3 = new EndWindowTuple(0x3L);
reservoir2.add(endWindow3);
Thread.sleep(sleeptime);
Assert.assertEquals(6, list.size());
Assert.assertNotSame(Thread.State.TERMINATED, t.getState());
reservoir2.add(est);
Thread.sleep(sleeptime);
Assert.assertEquals(7, list.size());
Thread.sleep(sleeptime);
Assert.assertEquals(Thread.State.TERMINATED, t.getState());
}
Aggregations