use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class CSVParserTest method TestParserWithoutHeader.
@Test
public void TestParserWithoutHeader() {
CsvToMapParser parser = new CsvToMapParser();
CollectorTestSink<Object> sink = new CollectorTestSink<Object>();
parser.output.setSink(sink);
parser.setFieldDelimiter(',');
parser.setLineDelimiter("\n");
ArrayList<CsvToMapParser.Field> fields = new ArrayList<CsvToMapParser.Field>();
Field field1 = new Field();
field1.setName("Eid");
field1.setType("INTEGER");
fields.add(field1);
Field field2 = new Field();
field2.setName("Name");
field2.setType("STRING");
fields.add(field2);
Field field3 = new Field();
field3.setName("Salary");
field3.setType("LONG");
fields.add(field3);
parser.setFields(fields);
parser.setHasHeader(false);
parser.setup(null);
String input = "123,xyz,567777\n321,abc,7777000\n456,pqr,5454545454";
parser.input.process(input.getBytes());
parser.teardown();
Assert.assertEquals("Tuples read should be same ", 3, sink.collectedTuples.size());
Assert.assertEquals("{Name=xyz, Salary=567777, Eid=123}", sink.collectedTuples.get(0).toString());
Assert.assertEquals("{Name=abc, Salary=7777000, Eid=321}", sink.collectedTuples.get(1).toString());
Assert.assertEquals("{Name=pqr, Salary=5454545454, Eid=456}", sink.collectedTuples.get(2).toString());
sink.clear();
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class NegateIndexTest method testSqlSelect.
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testSqlSelect() {
// create operator
SelectOperator oper = new SelectOperator();
oper.addIndex(new NegateExpression("b", null));
CollectorTestSink sink = new CollectorTestSink();
oper.outport.setSink(sink);
oper.setup(null);
oper.beginWindow(1);
HashMap<String, Object> tuple = new HashMap<String, Object>();
tuple.put("a", 0);
tuple.put("b", 1);
tuple.put("c", 2);
oper.inport.process(tuple);
tuple = new HashMap<String, Object>();
tuple.put("a", 1);
tuple.put("b", 3);
tuple.put("c", 4);
oper.inport.process(tuple);
tuple = new HashMap<String, Object>();
tuple.put("a", 1);
tuple.put("b", 5);
tuple.put("c", 6);
oper.inport.process(tuple);
oper.endWindow();
oper.teardown();
LOG.debug("{}", sink.collectedTuples);
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class SelectMaxMinTest method testSqlSelect.
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testSqlSelect() {
// create operator
SelectFunctionOperator oper = new SelectFunctionOperator();
oper.addSqlFunction(new MaxMinFunction("b", null, false));
CollectorTestSink sink = new CollectorTestSink();
oper.outport.setSink(sink);
oper.setup(null);
oper.beginWindow(1);
HashMap<String, Object> tuple = new HashMap<String, Object>();
tuple.put("a", 0);
tuple.put("b", 1);
tuple.put("c", 2);
oper.inport.process(tuple);
tuple = new HashMap<String, Object>();
tuple.put("a", 1);
tuple.put("b", 3);
tuple.put("c", 4);
oper.inport.process(tuple);
tuple = new HashMap<String, Object>();
tuple.put("a", 1);
tuple.put("b", 5);
tuple.put("c", 6);
oper.inport.process(tuple);
oper.endWindow();
oper.teardown();
LOG.debug("{}", sink.collectedTuples);
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testRecovery.
@Test
public void testRecovery() throws IOException {
int operatorId = 1;
when(windowDataManagerMock.getLargestCompletedWindow()).thenReturn(1L);
when(windowDataManagerMock.retrieve(1)).thenReturn(WindowData.of(null, 0, 4));
insertEvents(10, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext context = mockOperatorContext(operatorId, partitionAttributeMap);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
// setting as not calling partition logic
inputOperator.lastEmittedRow = 0;
inputOperator.isPollerPartition = true;
inputOperator.rangeQueryPair = new KeyValPair<>(0, 8);
inputOperator.outputPort.setup(tpc);
inputOperator.setScheduledExecutorService(mockscheduler);
inputOperator.setup(context);
inputOperator.setWindowManager(windowDataManagerMock);
inputOperator.activate(context);
CollectorTestSink<Object> sink = new CollectorTestSink<>();
inputOperator.outputPort.setSink(sink);
inputOperator.beginWindow(0);
verify(mockscheduler, times(0)).scheduleAtFixedRate(any(Runnable.class), anyLong(), anyLong(), any(TimeUnit.class));
verify(mockscheduler, times(0)).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class));
inputOperator.emitTuples();
inputOperator.endWindow();
inputOperator.beginWindow(1);
verify(mockscheduler, times(1)).scheduleAtFixedRate(any(Runnable.class), anyLong(), anyLong(), any(TimeUnit.class));
verify(mockscheduler, times(0)).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class));
}
use of org.apache.apex.malhar.lib.testbench.CollectorTestSink in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testPollWithOffsetRebase.
@Test
public void testPollWithOffsetRebase() throws Exception {
// clear table
insertEvents(0, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setColumnsExpression("ID,STARTDATE,STARTTIME,STARTTIMESTAMP");
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
inputOperator.setPartitionCount(1);
inputOperator.setRebaseOffset(true);
Collection<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> newPartitions = inputOperator.definePartitions(new ArrayList<Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>>(), null);
int operatorId = 0;
for (com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>> partition : newPartitions) {
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext partitioningContext = mockOperatorContext(operatorId++, partitionAttributeMap);
JdbcPOJOPollInputOperator parition = (JdbcPOJOPollInputOperator) partition.getPartitionedInstance();
parition.outputPort.setup(tpc);
parition.setScheduledExecutorService(mockscheduler);
parition.setup(partitioningContext);
parition.activate(partitioningContext);
}
Iterator<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> itr = newPartitions.iterator();
// First partition is for range queries,last is for polling queries
JdbcPOJOPollInputOperator firstInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
int rows = 0;
int windowId = 0;
insertEvents(4, false, rows);
rows += 4;
JdbcPOJOPollInputOperator poller = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink3 = new CollectorTestSink<>();
poller.outputPort.setSink(sink3);
poller.beginWindow(windowId++);
poller.pollRecords();
poller.emitTuples();
Assert.assertEquals("emitted", rows, sink3.collectedTuples.size());
poller.endWindow();
insertEvents(1, false, rows);
rows += 1;
poller.beginWindow(windowId++);
// offset rebase, fetch 1 record
poller.pollRecords();
poller.emitTuples();
Assert.assertEquals("emitted", rows, sink3.collectedTuples.size());
poller.endWindow();
}
Aggregations