use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class AppDataSnapshotServerPojoTest method simpleTest.
@Test
public void simpleTest() throws Exception {
String query = SchemaUtils.jarResourceFileToString("snapshotServerPojoQuery.json");
String schema = SchemaUtils.jarResourceFileToString("snapshotServerPojoSchema.json");
TestObjAllTypes objAllTypes = new TestObjAllTypes();
Map<String, String> fieldToGetter = Maps.newHashMap();
fieldToGetter.put("stringType", "getInnerObj().stringVal");
fieldToGetter.put("charType", "getInnerObj().charVal");
fieldToGetter.put("booleanType", "getInnerObj().boolVal");
fieldToGetter.put("longType", "getInnerObj().longVal");
fieldToGetter.put("integerType", "getInnerObj().intVal");
fieldToGetter.put("shortType", "getInnerObj().shortVal");
fieldToGetter.put("byteType", "getInnerObj().byteVal");
fieldToGetter.put("floatType", "getInnerObj().floatVal");
fieldToGetter.put("doubleType", "getInnerObj().doubleVal");
AppDataSnapshotServerPOJO snapshotServer = new AppDataSnapshotServerPOJO();
snapshotServer.setFieldToGetter(fieldToGetter);
snapshotServer.setSnapshotSchemaJSON(schema);
CollectorTestSink<String> resultSink = new CollectorTestSink<String>();
TestUtils.setSink(snapshotServer.queryResult, resultSink);
List<Object> inputList = Lists.newArrayList();
inputList.add(objAllTypes);
snapshotServer.setup(null);
snapshotServer.beginWindow(0L);
snapshotServer.input.put(inputList);
snapshotServer.query.put(query);
snapshotServer.endWindow();
Assert.assertEquals(1, resultSink.collectedTuples.size());
String result = resultSink.collectedTuples.get(0);
JSONObject data = new JSONObject(result).getJSONArray(DataQuerySnapshot.FIELD_DATA).getJSONObject(0);
Assert.assertEquals(objAllTypes.getInnerObj().stringVal, data.get("stringType"));
Assert.assertEquals(Character.toString(objAllTypes.getInnerObj().charVal), data.get("charType"));
Assert.assertEquals(Boolean.toString(objAllTypes.getInnerObj().boolVal), Boolean.toString((Boolean) data.get("booleanType")));
Assert.assertEquals(Long.toString(objAllTypes.getInnerObj().longVal), data.get("longType"));
Assert.assertEquals(Integer.toString(objAllTypes.getInnerObj().intVal), data.get("integerType"));
Assert.assertEquals(Short.toString(objAllTypes.getInnerObj().shortVal), data.get("shortType"));
Assert.assertEquals(Byte.toString(objAllTypes.getInnerObj().byteVal), data.get("byteType"));
Assert.assertEquals(Float.toString(objAllTypes.getInnerObj().floatVal), data.get("floatType"));
Assert.assertEquals(Double.toString(objAllTypes.getInnerObj().doubleVal), data.get("doubleType"));
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testDBPollerExtraField.
@Test
public void testDBPollerExtraField() throws Exception {
insertEvents(10, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setColumnsExpression("ID,STARTDATE,STARTTIME,STARTTIMESTAMP");
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
inputOperator.setPartitionCount(2);
Collection<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> newPartitions = inputOperator.definePartitions(new ArrayList<Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>>(), null);
int operatorId = 0;
for (com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>> partition : newPartitions) {
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext partitioningContext = mockOperatorContext(operatorId++, partitionAttributeMap);
JdbcPOJOPollInputOperator parition = (JdbcPOJOPollInputOperator) partition.getPartitionedInstance();
parition.outputPort.setup(tpc);
parition.setScheduledExecutorService(mockscheduler);
parition.setup(partitioningContext);
parition.activate(partitioningContext);
}
Iterator<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> itr = newPartitions.iterator();
// First partition is for range queries,last is for polling queries
JdbcPOJOPollInputOperator firstInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink1 = new CollectorTestSink<>();
firstInstance.outputPort.setSink(sink1);
firstInstance.beginWindow(0);
Assert.assertFalse(firstInstance.ps.isClosed());
firstInstance.pollRecords();
Assert.assertTrue(firstInstance.ps.isClosed());
firstInstance.emitTuples();
firstInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink1.collectedTuples.size());
for (Object tuple : sink1.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("date", pojoEvent.getId() < 5);
}
JdbcPOJOPollInputOperator secondInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink2 = new CollectorTestSink<>();
secondInstance.outputPort.setSink(sink2);
secondInstance.beginWindow(0);
secondInstance.pollRecords();
secondInstance.emitTuples();
secondInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink2.collectedTuples.size());
for (Object tuple : sink2.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getId() < 10 && pojoEvent.getId() >= 5);
}
insertEvents(4, false, 10);
JdbcPOJOPollInputOperator thirdInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink3 = new CollectorTestSink<>();
thirdInstance.outputPort.setSink(sink3);
thirdInstance.beginWindow(0);
thirdInstance.pollRecords();
thirdInstance.emitTuples();
thirdInstance.endWindow();
Assert.assertEquals("rows from db", 4, sink3.collectedTuples.size());
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testDBPoller.
@Test
public void testDBPoller() throws Exception {
insertEvents(10, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
inputOperator.setPartitionCount(2);
Collection<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> newPartitions = inputOperator.definePartitions(new ArrayList<Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>>(), null);
int operatorId = 0;
for (com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>> partition : newPartitions) {
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext partitioningContext = mockOperatorContext(operatorId++, partitionAttributeMap);
JdbcPOJOPollInputOperator parition = (JdbcPOJOPollInputOperator) partition.getPartitionedInstance();
parition.outputPort.setup(tpc);
parition.setScheduledExecutorService(mockscheduler);
parition.setup(partitioningContext);
parition.activate(partitioningContext);
}
Iterator<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> itr = newPartitions.iterator();
// First partition is for range queries,last is for polling queries
JdbcPOJOPollInputOperator firstInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink1 = new CollectorTestSink<>();
firstInstance.outputPort.setSink(sink1);
firstInstance.beginWindow(0);
firstInstance.pollRecords();
try {
firstInstance.pollRecords();
// non-poller partition
Assert.fail("expected closed connection");
} catch (Exception e) {
// expected
}
firstInstance.emitTuples();
firstInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink1.collectedTuples.size());
for (Object tuple : sink1.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("date", pojoEvent.getId() < 5);
}
JdbcPOJOPollInputOperator secondInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink2 = new CollectorTestSink<>();
secondInstance.outputPort.setSink(sink2);
secondInstance.beginWindow(0);
secondInstance.pollRecords();
secondInstance.emitTuples();
secondInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink2.collectedTuples.size());
for (Object tuple : sink2.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getId() < 10 && pojoEvent.getId() >= 5);
}
insertEvents(4, false, 10);
JdbcPOJOPollInputOperator thirdInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink3 = new CollectorTestSink<>();
thirdInstance.outputPort.setSink(sink3);
thirdInstance.beginWindow(0);
thirdInstance.pollRecords();
thirdInstance.pollRecords();
thirdInstance.emitTuples();
thirdInstance.endWindow();
Assert.assertEquals("rows from db", 4, sink3.collectedTuples.size());
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class DeduperTimeBasedPOJOImplTest method testDedup.
@Test
public void testDedup() {
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributes = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributes.put(DAG.APPLICATION_ID, APP_ID);
attributes.put(DAG.APPLICATION_PATH, applicationPath);
attributes.put(DAG.InputPortMeta.TUPLE_CLASS, TestPojo.class);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributes);
deduper.setup(context);
deduper.input.setup(new PortContext(attributes, context));
deduper.activate(context);
CollectorTestSink<TestPojo> uniqueSink = new CollectorTestSink<TestPojo>();
TestUtils.setSink(deduper.unique, uniqueSink);
CollectorTestSink<TestPojo> duplicateSink = new CollectorTestSink<TestPojo>();
TestUtils.setSink(deduper.duplicate, duplicateSink);
CollectorTestSink<TestPojo> expiredSink = new CollectorTestSink<TestPojo>();
TestUtils.setSink(deduper.expired, expiredSink);
deduper.beginWindow(0);
long millis = System.currentTimeMillis();
for (int i = 0; i < 100; i++) {
TestPojo pojo = new TestPojo(i, new Date(millis + i));
deduper.input.process(pojo);
}
TestPojo expiredPojo = new TestPojo(100, new Date(millis - 1000 * 60));
deduper.input.process(expiredPojo);
for (int i = 90; i < 200; i++) {
TestPojo pojo = new TestPojo(i, new Date(millis + i));
deduper.input.process(pojo);
}
deduper.handleIdleTime();
deduper.endWindow();
Assert.assertTrue(uniqueSink.collectedTuples.size() == 200);
Assert.assertTrue(duplicateSink.collectedTuples.size() == 10);
Assert.assertTrue(expiredSink.collectedTuples.size() == 1);
deduper.teardown();
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class PubSubWebSocketOperatorTest method testPubSubWebSocket.
@Test
@SuppressWarnings("SleepWhileInLoop")
public void testPubSubWebSocket() throws Exception {
Server server = new Server(0);
SamplePubSubWebSocketServlet servlet = new SamplePubSubWebSocketServlet();
ServletHolder sh = new ServletHolder(servlet);
ServletContextHandler contextHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS);
contextHandler.addServlet(sh, "/pubsub");
contextHandler.addServlet(sh, "/*");
server.start();
Connector[] connector = server.getConnectors();
URI uri = PubSubHelper.getURI("localhost:" + connector[0].getLocalPort());
PubSubWebSocketOutputOperator<Object> outputOperator = new PubSubWebSocketOutputOperator<Object>();
outputOperator.setUri(uri);
outputOperator.setTopic("testTopic");
PubSubWebSocketInputOperator<Object> inputOperator = new PubSubWebSocketInputOperator<Object>();
inputOperator.setUri(uri);
inputOperator.setTopic("testTopic");
CollectorTestSink<Object> sink = new CollectorTestSink<Object>();
inputOperator.outputPort.setSink(sink);
inputOperator.setup(null);
outputOperator.setup(null);
inputOperator.activate(null);
long timeout = System.currentTimeMillis() + 3000;
while (!servlet.hasSubscriber()) {
Thread.sleep(10);
if (System.currentTimeMillis() > timeout) {
throw new TimeoutException("No subscribers connected after 3 seconds");
}
}
inputOperator.beginWindow(1000);
outputOperator.beginWindow(1000);
Map<String, String> data = new HashMap<String, String>();
data.put("hello", "world");
outputOperator.input.process(data);
String stringData = "StringMessage";
outputOperator.input.process(stringData);
int timeoutMillis = 2000;
while (sink.collectedTuples.size() < 2 && timeoutMillis > 0) {
inputOperator.emitTuples();
timeoutMillis -= 20;
Thread.sleep(20);
}
outputOperator.endWindow();
inputOperator.endWindow();
Assert.assertTrue("tuples emitted", sink.collectedTuples.size() > 1);
@SuppressWarnings("unchecked") Map<String, String> tuple = (Map<String, String>) sink.collectedTuples.get(0);
Assert.assertEquals("Expects {\"hello\":\"world\"} as data", "world", tuple.get("hello"));
String stringResult = (String) sink.collectedTuples.get(1);
Assert.assertEquals("Expects {\"hello\":\"world\"} as data", stringData, stringResult);
inputOperator.deactivate();
outputOperator.teardown();
inputOperator.teardown();
server.stop();
}
Aggregations