use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class RabbitMQInputOperatorTest method testRecoveryAndIdempotency.
@Test
public void testRecoveryAndIdempotency() throws Exception {
RabbitMQInputOperator operator = new RabbitMQInputOperator();
operator.setWindowDataManager(new FSWindowDataManager());
operator.setHost("localhost");
operator.setExchange("testEx");
operator.setExchangeType("fanout");
Attribute.AttributeMap attributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
CollectorTestSink<Object> sink = new CollectorTestSink<Object>();
operator.outputPort.setSink(sink);
OperatorContext context = mockOperatorContext(1, attributeMap);
operator.setup(context);
operator.activate(context);
final RabbitMQMessageGenerator publisher = new RabbitMQMessageGenerator();
publisher.setup();
publisher.generateMessages(5);
Thread.sleep(10000);
operator.beginWindow(1);
operator.emitTuples();
operator.endWindow();
operator.deactivate();
Assert.assertEquals("num of messages in window 1", 15, sink.collectedTuples.size());
// failure and then re-deployment of operator
sink.collectedTuples.clear();
operator.setup(context);
operator.activate(context);
Assert.assertEquals("largest recovery window", 1, operator.getWindowDataManager().getLargestCompletedWindow());
operator.beginWindow(1);
operator.endWindow();
Assert.assertEquals("num of messages in window 1", 15, sink.collectedTuples.size());
sink.collectedTuples.clear();
operator.deactivate();
operator.teardown();
operator.getWindowDataManager().committed(1);
publisher.teardown();
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class RedisInputOperatorTest method testRecoveryAndIdempotency.
@Test
public void testRecoveryAndIdempotency() throws Exception {
this.operatorStore = new RedisStore();
this.testStore = new RedisStore();
testStore.connect();
ScanParams params = new ScanParams();
params.count(1);
testStore.put("test_abc", "789");
testStore.put("test_def", "456");
testStore.put("test_ghi", "123");
RedisKeyValueInputOperator operator = new RedisKeyValueInputOperator();
operator.setWindowDataManager(new FSWindowDataManager());
operator.setStore(operatorStore);
operator.setScanCount(1);
Attribute.AttributeMap attributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
CollectorTestSink<Object> sink = new CollectorTestSink<Object>();
operator.outputPort.setSink(sink);
OperatorContext context = mockOperatorContext(1, attributeMap);
try {
operator.setup(context);
operator.beginWindow(1);
operator.emitTuples();
operator.endWindow();
int numberOfMessagesInWindow1 = sink.collectedTuples.size();
sink.collectedTuples.clear();
operator.beginWindow(2);
operator.emitTuples();
operator.endWindow();
int numberOfMessagesInWindow2 = sink.collectedTuples.size();
sink.collectedTuples.clear();
// failure and then re-deployment of operator
// Re-instantiating to reset values
operator = new RedisKeyValueInputOperator();
operator.setWindowDataManager(new FSWindowDataManager());
operator.setStore(operatorStore);
operator.setScanCount(1);
operator.outputPort.setSink(sink);
operator.setup(context);
Assert.assertEquals("largest recovery window", 2, operator.getWindowDataManager().getLargestCompletedWindow());
operator.beginWindow(1);
operator.emitTuples();
operator.emitTuples();
operator.endWindow();
Assert.assertEquals("num of messages in window 1", numberOfMessagesInWindow1, sink.collectedTuples.size());
sink.collectedTuples.clear();
operator.beginWindow(2);
operator.emitTuples();
operator.endWindow();
Assert.assertEquals("num of messages in window 2", numberOfMessagesInWindow2, sink.collectedTuples.size());
} finally {
for (Object e : sink.collectedTuples) {
KeyValPair<String, String> entry = (KeyValPair<String, String>) e;
testStore.remove(entry.getKey());
}
sink.collectedTuples.clear();
operator.getWindowDataManager().committed(5);
operator.teardown();
}
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcNonTransactionalBatchOutputOperatorTest method testAtMostOnceHalfBatch.
@Test
public void testAtMostOnceHalfBatch() {
JdbcOperatorTest.cleanTable();
Random random = new Random();
TestOutputOperator outputOperator = createOperator(ProcessingMode.AT_MOST_ONCE);
outputOperator.beginWindow(0);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(1);
for (int batchCounter = 0; batchCounter < HALF_BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.getStore().disconnect();
// //
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_MOST_ONCE);
attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, 0L);
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
outputOperator.setup(context);
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(2);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 2, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 2 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcNonTransactionalBatchOutputOperatorTest method testAtLeastOnceHalfBatch.
@Test
public void testAtLeastOnceHalfBatch() {
JdbcOperatorTest.cleanTable();
Random random = new Random();
TestOutputOperator outputOperator = createOperator(ProcessingMode.AT_LEAST_ONCE);
outputOperator.beginWindow(0);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(1);
for (int batchCounter = 0; batchCounter < HALF_BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.getStore().disconnect();
// //
com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap attributeMap = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
attributeMap.put(OperatorContext.PROCESSING_MODE, ProcessingMode.AT_LEAST_ONCE);
attributeMap.put(OperatorContext.ACTIVATION_WINDOW_ID, 0L);
attributeMap.put(DAG.APPLICATION_ID, APP_ID);
OperatorContext context = mockOperatorContext(OPERATOR_ID, attributeMap);
outputOperator.setup(context);
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(0);
for (int batchCounter = 0; batchCounter < BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 0, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
outputOperator.beginWindow(1);
for (int batchCounter = 0; batchCounter < HALF_BATCH_SIZE; batchCounter++) {
outputOperator.input.put(new TestEvent(random.nextInt()));
}
outputOperator.endWindow();
Assert.assertEquals("Commit window id ", 1, outputOperator.getStore().getCommittedWindowId(APP_ID, OPERATOR_ID));
Assert.assertEquals("Batch should be written", 2 * BATCH_SIZE, outputOperator.getNumOfEventsInStore(outputOperator.getStore().connection));
}
use of com.datatorrent.api.Context.OperatorContext in project apex-malhar by apache.
the class JdbcPojoPollableOpeartorTest method testDBPoller.
@Test
public void testDBPoller() throws Exception {
insertEvents(10, true, 0);
JdbcStore store = new JdbcStore();
store.setDatabaseDriver(DB_DRIVER);
store.setDatabaseUrl(URL);
List<FieldInfo> fieldInfos = getFieldInfos();
Attribute.AttributeMap.DefaultAttributeMap portAttributes = new Attribute.AttributeMap.DefaultAttributeMap();
portAttributes.put(Context.PortContext.TUPLE_CLASS, TestPOJOEvent.class);
TestPortContext tpc = new TestPortContext(portAttributes);
JdbcPOJOPollInputOperator inputOperator = new JdbcPOJOPollInputOperator();
inputOperator.setStore(store);
inputOperator.setTableName(TABLE_POJO_NAME);
inputOperator.setKey("id");
inputOperator.setFieldInfos(fieldInfos);
inputOperator.setFetchSize(100);
inputOperator.setBatchSize(100);
inputOperator.setPartitionCount(2);
Collection<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> newPartitions = inputOperator.definePartitions(new ArrayList<Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>>(), null);
int operatorId = 0;
for (com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>> partition : newPartitions) {
Attribute.AttributeMap.DefaultAttributeMap partitionAttributeMap = new Attribute.AttributeMap.DefaultAttributeMap();
partitionAttributeMap.put(DAG.APPLICATION_ID, APP_ID);
partitionAttributeMap.put(Context.DAGContext.APPLICATION_PATH, dir);
OperatorContext partitioningContext = mockOperatorContext(operatorId++, partitionAttributeMap);
JdbcPOJOPollInputOperator parition = (JdbcPOJOPollInputOperator) partition.getPartitionedInstance();
parition.outputPort.setup(tpc);
parition.setScheduledExecutorService(mockscheduler);
parition.setup(partitioningContext);
parition.activate(partitioningContext);
}
Iterator<com.datatorrent.api.Partitioner.Partition<AbstractJdbcPollInputOperator<Object>>> itr = newPartitions.iterator();
// First partition is for range queries,last is for polling queries
JdbcPOJOPollInputOperator firstInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink1 = new CollectorTestSink<>();
firstInstance.outputPort.setSink(sink1);
firstInstance.beginWindow(0);
firstInstance.pollRecords();
try {
firstInstance.pollRecords();
// non-poller partition
Assert.fail("expected closed connection");
} catch (Exception e) {
// expected
}
firstInstance.emitTuples();
firstInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink1.collectedTuples.size());
for (Object tuple : sink1.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getStartDate() instanceof Date);
Assert.assertTrue("date", pojoEvent.getId() < 5);
}
JdbcPOJOPollInputOperator secondInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink2 = new CollectorTestSink<>();
secondInstance.outputPort.setSink(sink2);
secondInstance.beginWindow(0);
secondInstance.pollRecords();
secondInstance.emitTuples();
secondInstance.endWindow();
Assert.assertEquals("rows from db", 5, sink2.collectedTuples.size());
for (Object tuple : sink2.collectedTuples) {
TestPOJOEvent pojoEvent = (TestPOJOEvent) tuple;
Assert.assertTrue("date", pojoEvent.getId() < 10 && pojoEvent.getId() >= 5);
}
insertEvents(4, false, 10);
JdbcPOJOPollInputOperator thirdInstance = (JdbcPOJOPollInputOperator) itr.next().getPartitionedInstance();
CollectorTestSink<Object> sink3 = new CollectorTestSink<>();
thirdInstance.outputPort.setSink(sink3);
thirdInstance.beginWindow(0);
thirdInstance.pollRecords();
thirdInstance.pollRecords();
thirdInstance.emitTuples();
thirdInstance.endWindow();
Assert.assertEquals("rows from db", 4, sink3.collectedTuples.size());
}
Aggregations