use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcNonTransactionalBatchOutputOperatorTest method testAtMostOnceFullBatch.
@Test
public void testAtMostOnceFullBatch() {
JdbcOperatorTest.cleanTable();
Random random = new Random();
TestOutputOperator outputOperator = createOperator(ProcessingMode.AT_MOST_ONCE);
outputOperator.beginWindow(0);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(1);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 2 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.getStore().disconnect();
// //
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_MOST_ONCE);
attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, 0L);
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
outputOperator.setup(context);
outputOperator.beginWindow(2);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 2, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 3 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcNonTransactionalBatchOutputOperatorTest method createOperator.
private static TestOutputOperator createOperator(ProcessingMode processingMode) {
JdbcNonTransactionalStore store = new JdbcNonTransactionalStore();
store.setDatabaseDriver(JdbcNonTransactionalOutputOperatorTest.DB_DRIVER);
store.setDatabaseUrl(JdbcNonTransactionalOutputOperatorTest.URL);
TestOutputOperator outputOperator = new TestOutputOperator();
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(OperatorContext.PROCESSING_MODE, processingMode);
attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, -1L);
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
outputOperator.setStore(store);
outputOperator.setBatchSize(BATCH_SIZE);
outputOperator.setup(context);
return outputOperator;
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcNonTransactionalBatchOutputOperatorTest method testAtLeastOnceFullBatch.
@Test
public void testAtLeastOnceFullBatch() {
JdbcOperatorTest.cleanTable();
Random random = new Random();
TestOutputOperator outputOperator = createOperator(ProcessingMode.AT_LEAST_ONCE);
outputOperator.beginWindow(0);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(1);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 2 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.getStore().disconnect();
// //
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_LEAST_ONCE);
attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, 0L);
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
outputOperator.setup(context);
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 2 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(0);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 2 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(1);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 1, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 3 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testRecovery.
@Test
public void testRecovery() throws IOException {
int operatorId = 1;
when(windowDataManagerMock.getLargestCompletedWindow()).thenReturn(1L);
when(windowDataManagerMock.retrieve(1)).thenReturn(WindowData.of(null, 0, 4));
insertEvents(10, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext context = mockOperatorContext(operatorId, partitionAttributeMap);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
// setting as not calling partition logic
inputOperator.lastEmittedRow = 0;
inputOperator.isPollerPartition = true;
inputOperator.rangeQueryPair = new KeyValPair<>(0, 8);
inputOperator.outputPort.setup(tpc);
inputOperator.setScheduledExecutorService(mockscheduler);
inputOperator.setup(context);
inputOperator.setWindowManager(windowDataManagerMock);
inputOperator.activate(context);
CollectorTestSink<Object> sink = new CollectorTestSink<>();
inputOperator.outputPort.setSink(sink);
inputOperator.beginWindow(0);
verify(mockscheduler, times(0)).scheduleAtFixedRate(any(Runnable.class), anyLong(), anyLong(), any(TimeUnit.class));
verify(mockscheduler, times(0)).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class));
inputOperator.emitTuples();
inputOperator.endWindow();
inputOperator.beginWindow(1);
verify(mockscheduler, times(1)).scheduleAtFixedRate(any(Runnable.class), anyLong(), anyLong(), any(TimeUnit.class));
verify(mockscheduler, times(0)).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class));
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testPollWithOffsetRebase.
@Test
public void testPollWithOffsetRebase() throws Exception {
// clear table
insertEvents(0, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setColumnsExpression("ID,STARTDATE,STARTTIME,STARTTIMESTAMP");
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
inputOperator.setPartitionCount(1);
inputOperator.setRebaseOffset(true);
Collection<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> newPartitions = inputOperator.definePartitions(new ArrayList<Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>>(), null);
int operatorId = 0;
for (com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>> partition : newPartitions) {
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext partitioningContext = mockOperatorContext(operatorId++, partitionAttributeMap);
JdbcPOJOPollInputOperator parition = (JdbcPOJOPollInputOperator) partition.getPartitionedInstance();
parition.outputPort.setup(tpc);
parition.setScheduledExecutorService(mockscheduler);
parition.setup(partitioningContext);
parition.activate(partitioningContext);
}
Iterator<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> itr = newPartitions.iterator();
// First partition is for range queries,last is for polling queries
JdbcPOJOPollInputOperator firstInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
int rows = 0;
int windowId = 0;
insertEvents(4, false, rows);
rows += 4;
JdbcPOJOPollInputOperator poller = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink3 = new CollectorTestSink<>();
poller.outputPort.setSink(sink3);
poller.beginWindow(windowId++);
poller.pollRecords();
poller.emitTuples();
Assert.assertEquals("emitted", rows, sink3.collectedTuples.size());
poller.endWindow();
insertEvents(1, false, rows);
rows += 1;
poller.beginWindow(windowId++);
// offset rebase, fetch 1 record
poller.pollRecords();
poller.emitTuples();
Assert.assertEquals("emitted", rows, sink3.collectedTuples.size());
poller.endWindow();
}
Aggregations