use of org.apache.apex.malhar.lib.helper.TestPortContext in project apex-malhar by apache.
the class JdbcPojoOperatorTest method testJdbcPojoInputOperator.
@Test
public void testJdbcPojoInputOperator() {
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
Attribute.AttributeMap.DefaultAttributeMap attributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
insertEvents(10, true, 0);
JdbcPOJOInputOperator inputOperator = new JdbcPOJOInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
List<FieldInfo> fieldInfos = Lists.newArrayList();
fieldInfos.add(new FieldInfo("ID", "id", null));
fieldInfos.add(new FieldInfo("STARTDATE", "startDate", null));
fieldInfos.add(new FieldInfo("STARTTIME", "startTime", null));
fieldInfos.add(new FieldInfo("STARTTIMESTAMP", "startTimestamp", null));
fieldInfos.add(new FieldInfo("SCORE", "score", FieldInfo.SupportType.DOUBLE));
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(5);
CollectorTestSink<Object> sink = new CollectorTestSink<>();
inputOperator.outputPort.setSink(sink);
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
inputOperator.setup(context);
inputOperator.outputPort.setup(tpc);
inputOperator.activate(context);
inputOperator.beginWindow(0);
inputOperator.emitTuples();
inputOperator.endWindow();
Assert.assertEquals("rows from db", 5, sink.collectedTuples.size());
int i = 0;
for (Object tuple : sink.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("i=" + i, pojoEvent.getId() == i);
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("time", pojoEvent.getStartTime() instanceof Time);
Assert.assertTrue("timestamp", pojoEvent.getStartTimestamp() instanceof Timestamp);
i++;
}
sink.collectedTuples.clear();
inputOperator.beginWindow(1);
inputOperator.emitTuples();
inputOperator.endWindow();
Assert.assertEquals("rows from db", 5, sink.collectedTuples.size());
for (Object tuple : sink.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("i=" + i, pojoEvent.getId() == i);
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("time", pojoEvent.getStartTime() instanceof Time);
Assert.assertTrue("timestamp", pojoEvent.getStartTimestamp() instanceof Timestamp);
Assert.assertTrue("score", pojoEvent.getScore() == 55.4);
i++;
}
sink.collectedTuples.clear();
inputOperator.beginWindow(2);
inputOperator.emitTuples();
inputOperator.endWindow();
Assert.assertEquals("rows from db", 0, sink.collectedTuples.size());
// Insert 3 more tuples and check if they are read successfully.
insertEvents(3, false, 10);
inputOperator.beginWindow(3);
inputOperator.emitTuples();
inputOperator.endWindow();
Assert.assertEquals("rows from db", 3, sink.collectedTuples.size());
for (Object tuple : sink.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("i=" + i, pojoEvent.getId() == i);
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("time", pojoEvent.getStartTime() instanceof Time);
Assert.assertTrue("timestamp", pojoEvent.getStartTimestamp() instanceof Timestamp);
Assert.assertTrue("score", pojoEvent.getScore() == 55.4);
i++;
}
}
use of org.apache.apex.malhar.lib.helper.TestPortContext in project apex-malhar by apache.
the class JdbcPojoOperatorTest method testJdbcPojoInsertOutputOperator.
/**
* This test will assume direct mapping for POJO fields to DB columns All
* fields in DB present in POJO
*/
@Test
public void testJdbcPojoInsertOutputOperator() {
JdbcTransactionalStore transactionalStore = new JdbcTransactionalStore();
transactionalStore.setDatabaseDriver(DB_DRIVER);
transactionalStore.setDatabaseUrl(URL);
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
TestPOJOOutputOperator outputOperator = new TestPOJOOutputOperator();
outputOperator.setBatchSize(3);
outputOperator.setTablename(TABLE_POJO_NAME);
outputOperator.setStore(transactionalStore);
outputOperator.setup(context);
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
outputOperator.input.setup(tpc);
CollectorTestSink<Object> errorSink = new CollectorTestSink<>();
TestUtils.setSink(outputOperator.error, errorSink);
outputOperator.activate(context);
List<TestPOJOEvent> events = Lists.newArrayList();
for (int i = 0; i < 10; i++) {
events.add(new TestPOJOEvent(i, "test" + i));
}
// Records violating PK constraint
events.add(new TestPOJOEvent(0, "test0"));
// Records violating PK constraint
events.add(new TestPOJOEvent(2, "test2"));
// Clean record
events.add(new TestPOJOEvent(10, "test10"));
// Clean record
events.add(new TestPOJOEvent(11, "test11"));
// Records violating PK constraint
events.add(new TestPOJOEvent(3, "test3"));
// Clean record
events.add(new TestPOJOEvent(12, "test12"));
outputOperator.beginWindow(0);
for (TestPOJOEvent event : events) {
outputOperator.input.process(event);
}
outputOperator.endWindow();
Assert.assertEquals("rows in db", 13, outputOperator.getNumOfEventsInStore(TABLE_POJO_NAME));
Assert.assertEquals("Error tuples", 3, errorSink.collectedTuples.size());
}
use of org.apache.apex.malhar.lib.helper.TestPortContext in project apex-malhar by apache.
the class JdbcPojoOperatorTest method testJdbcPojoInsertOutputOperatorExactlyOnce.
/**
* This test will assume direct mapping for POJO fields to DB columns All
* fields in DB present in POJO and will test it for exactly once criteria
*/
@Test
public void testJdbcPojoInsertOutputOperatorExactlyOnce() {
JdbcTransactionalStore transactionalStore = new JdbcTransactionalStore();
transactionalStore.setDatabaseDriver(DB_DRIVER);
transactionalStore.setDatabaseUrl(URL);
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
TestPOJOOutputOperator outputOperator = new TestPOJOOutputOperator();
outputOperator.setBatchSize(3);
outputOperator.setTablename(TABLE_POJO_NAME);
outputOperator.setStore(transactionalStore);
outputOperator.setup(context);
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
outputOperator.input.setup(tpc);
CollectorTestSink<Object> errorSink = new CollectorTestSink<>();
TestUtils.setSink(outputOperator.error, errorSink);
outputOperator.activate(context);
List<TestPOJOEvent> events = Lists.newArrayList();
for (int i = 0; i < 70; i++) {
events.add(new TestPOJOEvent(i, "test" + i));
}
outputOperator.beginWindow(0);
for (int i = 0; i < 10; i++) {
outputOperator.input.process(events.get(i));
}
outputOperator.endWindow();
outputOperator.beginWindow(1);
for (int i = 10; i < 20; i++) {
outputOperator.input.process(events.get(i));
}
outputOperator.endWindow();
outputOperator.beginWindow(2);
for (int i = 20; i < 30; i++) {
outputOperator.input.process(events.get(i));
}
outputOperator.endWindow();
outputOperator.setup(context);
outputOperator.input.setup(tpc);
outputOperator.activate(context);
outputOperator.beginWindow(0);
for (int i = 30; i < 40; i++) {
outputOperator.input.process(events.get(i));
}
outputOperator.endWindow();
outputOperator.beginWindow(1);
for (int i = 40; i < 50; i++) {
outputOperator.input.process(events.get(i));
}
outputOperator.endWindow();
outputOperator.beginWindow(2);
for (int i = 50; i < 60; i++) {
outputOperator.input.process(events.get(i));
}
outputOperator.beginWindow(3);
for (int i = 60; i < 70; i++) {
outputOperator.input.process(events.get(i));
}
outputOperator.endWindow();
outputOperator.deactivate();
outputOperator.teardown();
Assert.assertEquals("rows in db", 40, outputOperator.getNumOfEventsInStore(TABLE_POJO_NAME));
}
use of org.apache.apex.malhar.lib.helper.TestPortContext in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testDBPollerExtraField.
@Test
public void testDBPollerExtraField() throws Exception {
insertEvents(10, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setColumnsExpression("ID,STARTDATE,STARTTIME,STARTTIMESTAMP");
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
inputOperator.setPartitionCount(2);
Collection<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> newPartitions = inputOperator.definePartitions(new ArrayList<Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>>(), null);
int operatorId = 0;
for (com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>> partition : newPartitions) {
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext partitioningContext = mockOperatorContext(operatorId++, partitionAttributeMap);
JdbcPOJOPollInputOperator parition = (JdbcPOJOPollInputOperator) partition.getPartitionedInstance();
parition.outputPort.setup(tpc);
parition.setScheduledExecutorService(mockscheduler);
parition.setup(partitioningContext);
parition.activate(partitioningContext);
}
Iterator<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> itr = newPartitions.iterator();
// First partition is for range queries,last is for polling queries
JdbcPOJOPollInputOperator firstInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink1 = new CollectorTestSink<>();
firstInstance.outputPort.setSink(sink1);
firstInstance.beginWindow(0);
Assert.assertFalse(firstInstance.ps.isClosed());
firstInstance.pollRecords();
Assert.assertTrue(firstInstance.ps.isClosed());
firstInstance.emitTuples();
firstInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink1.collectedTuples.size());
for (Object tuple : sink1.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("date", pojoEvent.getId() < 5);
}
JdbcPOJOPollInputOperator secondInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink2 = new CollectorTestSink<>();
secondInstance.outputPort.setSink(sink2);
secondInstance.beginWindow(0);
secondInstance.pollRecords();
secondInstance.emitTuples();
secondInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink2.collectedTuples.size());
for (Object tuple : sink2.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getId() < 10 && pojoEvent.getId() >= 5);
}
insertEvents(4, false, 10);
JdbcPOJOPollInputOperator thirdInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink3 = new CollectorTestSink<>();
thirdInstance.outputPort.setSink(sink3);
thirdInstance.beginWindow(0);
thirdInstance.pollRecords();
thirdInstance.emitTuples();
thirdInstance.endWindow();
Assert.assertEquals("rows from db", 4, sink3.collectedTuples.size());
}
use of org.apache.apex.malhar.lib.helper.TestPortContext in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testDBPoller.
@Test
public void testDBPoller() throws Exception {
insertEvents(10, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
inputOperator.setPartitionCount(2);
Collection<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> newPartitions = inputOperator.definePartitions(new ArrayList<Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>>(), null);
int operatorId = 0;
for (com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>> partition : newPartitions) {
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext partitioningContext = mockOperatorContext(operatorId++, partitionAttributeMap);
JdbcPOJOPollInputOperator parition = (JdbcPOJOPollInputOperator) partition.getPartitionedInstance();
parition.outputPort.setup(tpc);
parition.setScheduledExecutorService(mockscheduler);
parition.setup(partitioningContext);
parition.activate(partitioningContext);
}
Iterator<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> itr = newPartitions.iterator();
// First partition is for range queries,last is for polling queries
JdbcPOJOPollInputOperator firstInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink1 = new CollectorTestSink<>();
firstInstance.outputPort.setSink(sink1);
firstInstance.beginWindow(0);
firstInstance.pollRecords();
try {
firstInstance.pollRecords();
// non-poller partition
Assert.fail("expected closed connection");
} catch (Exception e) {
// expected
}
firstInstance.emitTuples();
firstInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink1.collectedTuples.size());
for (Object tuple : sink1.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("date", pojoEvent.getId() < 5);
}
JdbcPOJOPollInputOperator secondInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink2 = new CollectorTestSink<>();
secondInstance.outputPort.setSink(sink2);
secondInstance.beginWindow(0);
secondInstance.pollRecords();
secondInstance.emitTuples();
secondInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink2.collectedTuples.size());
for (Object tuple : sink2.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getId() < 10 && pojoEvent.getId() >= 5);
}
insertEvents(4, false, 10);
JdbcPOJOPollInputOperator thirdInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink3 = new CollectorTestSink<>();
thirdInstance.outputPort.setSink(sink3);
thirdInstance.beginWindow(0);
thirdInstance.pollRecords();
thirdInstance.pollRecords();
thirdInstance.emitTuples();
thirdInstance.endWindow();
Assert.assertEquals("rows from db", 4, sink3.collectedTuples.size());
}
Aggregations