use of com.datatorrent.common.util.FSStorageAgent in project apex-core by apache.
the class StramRecoveryTest method testRestartApp.
private void testRestartApp(StorageAgent agent, String appPath1) throws Exception {
String appId1 = "app1";
String appId2 = "app2";
String appPath2 = testMeta.getPath() + "/" + appId2;
dag.setAttribute(LogicalPlan.APPLICATION_ID, appId1);
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath1);
dag.setAttribute(LogicalPlan.APPLICATION_ATTEMPT_ID, 1);
dag.setAttribute(OperatorContext.STORAGE_AGENT, agent);
dag.addOperator("o1", StatsListeningOperator.class);
FSRecoveryHandler recoveryHandler = new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false));
StreamingContainerManager.getInstance(recoveryHandler, dag, false);
// test restore initial snapshot + log
dag = new LogicalPlan();
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath1);
StreamingContainerManager scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
PhysicalPlan plan = scm.getPhysicalPlan();
// original plan
dag = plan.getLogicalPlan();
Assert.assertNotNull("operator", dag.getOperatorMeta("o1"));
PTOperator o1p1 = plan.getOperators(dag.getOperatorMeta("o1")).get(0);
long[] ids = new FSStorageAgent(appPath1 + "/" + LogicalPlan.SUBDIR_CHECKPOINTS, new Configuration()).getWindowIds(o1p1.getId());
Assert.assertArrayEquals(new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
Assert.assertNull(o1p1.getContainer().getExternalId());
// trigger journal write
o1p1.getContainer().setExternalId("cid1");
scm.writeJournal(o1p1.getContainer().getSetContainerState());
/* simulate application restart from app1 */
dag = new LogicalPlan();
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath2);
dag.setAttribute(LogicalPlan.APPLICATION_ID, appId2);
StramClient sc = new StramClient(new Configuration(), dag);
try {
sc.start();
sc.copyInitialState(new Path(appPath1));
} finally {
sc.stop();
}
scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
plan = scm.getPhysicalPlan();
dag = plan.getLogicalPlan();
assertEquals("modified appId", appId2, dag.getValue(LogicalPlan.APPLICATION_ID));
assertEquals("modified appPath", appPath2, dag.getValue(LogicalPlan.APPLICATION_PATH));
Assert.assertNotNull("operator", dag.getOperatorMeta("o1"));
o1p1 = plan.getOperators(dag.getOperatorMeta("o1")).get(0);
assertEquals("journal copied", "cid1", o1p1.getContainer().getExternalId());
CascadeStorageAgent csa = (CascadeStorageAgent) dag.getAttributes().get(OperatorContext.STORAGE_AGENT);
Assert.assertEquals("storage agent is replaced by cascade", csa.getClass(), CascadeStorageAgent.class);
Assert.assertEquals("current storage agent is of same type", csa.getCurrentStorageAgent().getClass(), agent.getClass());
Assert.assertEquals("parent storage agent is of same type ", csa.getParentStorageAgent().getClass(), agent.getClass());
/* parent and current points to expected location */
Assert.assertEquals(true, ((FSStorageAgent) csa.getParentStorageAgent()).path.contains("app1"));
Assert.assertEquals(true, ((FSStorageAgent) csa.getCurrentStorageAgent()).path.contains("app2"));
ids = csa.getWindowIds(o1p1.getId());
Assert.assertArrayEquals("checkpoints copied", new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
/* simulate another application restart from app2 */
String appId3 = "app3";
String appPath3 = testMeta.getPath() + "/" + appId3;
dag = new LogicalPlan();
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath3);
dag.setAttribute(LogicalPlan.APPLICATION_ID, appId3);
sc = new StramClient(new Configuration(), dag);
try {
sc.start();
// copy state from app2.
sc.copyInitialState(new Path(appPath2));
} finally {
sc.stop();
}
scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
plan = scm.getPhysicalPlan();
dag = plan.getLogicalPlan();
csa = (CascadeStorageAgent) dag.getAttributes().get(OperatorContext.STORAGE_AGENT);
Assert.assertEquals("storage agent is replaced by cascade", csa.getClass(), CascadeStorageAgent.class);
Assert.assertEquals("current storage agent is of same type", csa.getCurrentStorageAgent().getClass(), agent.getClass());
Assert.assertEquals("parent storage agent is of same type ", csa.getParentStorageAgent().getClass(), CascadeStorageAgent.class);
CascadeStorageAgent parent = (CascadeStorageAgent) csa.getParentStorageAgent();
Assert.assertEquals("current storage agent is of same type ", parent.getCurrentStorageAgent().getClass(), agent.getClass());
Assert.assertEquals("parent storage agent is cascade ", parent.getParentStorageAgent().getClass(), agent.getClass());
/* verify paths */
Assert.assertEquals(true, ((FSStorageAgent) parent.getParentStorageAgent()).path.contains("app1"));
Assert.assertEquals(true, ((FSStorageAgent) parent.getCurrentStorageAgent()).path.contains("app2"));
Assert.assertEquals(true, ((FSStorageAgent) csa.getCurrentStorageAgent()).path.contains("app3"));
ids = csa.getWindowIds(o1p1.getId());
Assert.assertArrayEquals("checkpoints copied", new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
}
use of com.datatorrent.common.util.FSStorageAgent in project apex-core by apache.
the class StramRecoveryTest method testWriteAheadLog.
@Test
public void testWriteAheadLog() throws Exception {
final MutableInt flushCount = new MutableInt();
final MutableBoolean isClosed = new MutableBoolean(false);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new FSStorageAgent(testMeta.getPath(), null));
TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan plan = scm.getPhysicalPlan();
Journal j = scm.getJournal();
ByteArrayOutputStream bos = new ByteArrayOutputStream() {
@Override
public void flush() throws IOException {
super.flush();
flushCount.increment();
}
@Override
public void close() throws IOException {
super.close();
isClosed.setValue(true);
}
};
j.setOutputStream(new DataOutputStream(bos));
PTOperator o1p1 = plan.getOperators(dag.getMeta(o1)).get(0);
assertEquals(PTOperator.State.PENDING_DEPLOY, o1p1.getState());
String externalId = new MockContainer(scm, o1p1.getContainer()).container.getExternalId();
assertEquals("flush count", 1, flushCount.intValue());
o1p1.setState(PTOperator.State.ACTIVE);
assertEquals(PTOperator.State.ACTIVE, o1p1.getState());
assertEquals("flush count", 2, flushCount.intValue());
assertEquals("is closed", false, isClosed.booleanValue());
// this will close the stream. There are 2 calls to flush() during the close() - one in Kryo Output and one
// in FilterOutputStream
j.setOutputStream(null);
assertEquals("flush count", 4, flushCount.intValue());
assertEquals("is closed", true, isClosed.booleanValue());
// output stream is closed, so state will be changed without recording it in the journal
o1p1.setState(PTOperator.State.INACTIVE);
assertEquals(PTOperator.State.INACTIVE, o1p1.getState());
assertEquals("flush count", 4, flushCount.intValue());
ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
j.replay(new DataInputStream(bis));
assertEquals(PTOperator.State.ACTIVE, o1p1.getState());
InetSocketAddress addr1 = InetSocketAddress.createUnresolved("host1", 1);
PTContainer c1 = plan.getContainers().get(0);
c1.setState(PTContainer.State.ALLOCATED);
c1.host = "host1";
c1.bufferServerAddress = addr1;
c1.setAllocatedMemoryMB(2);
c1.setRequiredMemoryMB(1);
c1.setAllocatedVCores(3);
c1.setRequiredVCores(4);
j.setOutputStream(new DataOutputStream(bos));
j.write(c1.getSetContainerState());
c1.setExternalId(null);
c1.setState(PTContainer.State.NEW);
c1.setExternalId(null);
c1.host = null;
c1.bufferServerAddress = null;
bis = new ByteArrayInputStream(bos.toByteArray());
j.replay(new DataInputStream(bis));
assertEquals(externalId, c1.getExternalId());
assertEquals(PTContainer.State.ALLOCATED, c1.getState());
assertEquals("host1", c1.host);
assertEquals(addr1, c1.bufferServerAddress);
assertEquals(1, c1.getRequiredMemoryMB());
assertEquals(2, c1.getAllocatedMemoryMB());
assertEquals(3, c1.getAllocatedVCores());
assertEquals(4, c1.getRequiredVCores());
j.write(scm.getSetOperatorProperty("o1", "maxTuples", "100"));
o1.setMaxTuples(10);
j.setOutputStream(null);
bis = new ByteArrayInputStream(bos.toByteArray());
j.replay(new DataInputStream(bis));
assertEquals(100, o1.getMaxTuples());
j.setOutputStream(new DataOutputStream(bos));
scm.setOperatorProperty("o1", "maxTuples", "10");
assertEquals(10, o1.getMaxTuples());
o1.setMaxTuples(100);
assertEquals(100, o1.getMaxTuples());
j.setOutputStream(null);
bis = new ByteArrayInputStream(bos.toByteArray());
j.replay(new DataInputStream(bis));
assertEquals(10, o1.getMaxTuples());
j.setOutputStream(new DataOutputStream(bos));
scm.setPhysicalOperatorProperty(o1p1.getId(), "maxTuples", "50");
}
use of com.datatorrent.common.util.FSStorageAgent in project apex-core by apache.
the class StramRecoveryTest method testRestartAppWithSyncAgent.
@Test
public void testRestartAppWithSyncAgent() throws Exception {
final String appPath1 = testMeta.getPath() + "/app1";
testRestartApp(new FSStorageAgent(appPath1 + "/" + LogicalPlan.SUBDIR_CHECKPOINTS, null), appPath1);
}
use of com.datatorrent.common.util.FSStorageAgent in project apex-core by apache.
the class StreamingContainerManagerTest method testCheckpointWindowIds.
@Test
public void testCheckpointWindowIds() throws Exception {
FSStorageAgent sa = new FSStorageAgent(testMeta.getPath(), null);
long[] windowIds = new long[] { 123L, 345L, 234L };
for (long windowId : windowIds) {
sa.save(windowId, 1, windowId);
}
Arrays.sort(windowIds);
long[] windowsIds = sa.getWindowIds(1);
Arrays.sort(windowsIds);
Assert.assertArrayEquals("Saved windowIds", windowIds, windowsIds);
}
use of com.datatorrent.common.util.FSStorageAgent in project apex-malhar by apache.
the class KafkaInputOperatorTest method testKafkaInputOperator.
/**
* Test AbstractKafkaSinglePortInputOperator (i.e. an input adapter for
* Kafka, aka consumer). This module receives data from an outside test
* generator through Kafka message bus and feed that data into Malhar
* streaming platform.
*
* [Generate message and send that to Kafka message bus] ==> [Receive that
* message through Kafka input adapter(i.e. consumer) and send using
* emitTuples() interface on output port during onMessage call]
*
* @throws Exception
*/
public void testKafkaInputOperator(int sleepTime, final int totalCount, KafkaConsumer consumer, boolean isValid, boolean idempotent) throws Exception {
// initial the latch for this test
latch = new CountDownLatch(1);
// Start producer
KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC);
p.setSendCount(totalCount);
new Thread(p).start();
// Create DAG for testing.
LocalMode lma = LocalMode.newInstance();
DAG dag = lma.getDAG();
// Create KafkaSinglePortStringInputOperator
KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer", KafkaSinglePortStringInputOperator.class);
if (isSuicide) {
// make some extreme assumptions to make it fail if checkpointing wrong offsets
dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);
dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new FSStorageAgent("target/ck", new Configuration()));
node.setMaxTuplesPerWindow(500);
}
if (idempotent) {
node.setWindowDataManager(new FSWindowDataManager());
}
consumer.setTopic(TEST_TOPIC);
node.setConsumer(consumer);
consumer.setCacheSize(5000);
if (isValid) {
node.setZookeeper("localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0]);
}
// Create Test tuple collector
CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());
// Connect ports
dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);
// Create local cluster
final LocalMode.Controller lc = lma.getController();
lc.setHeartbeatMonitoringEnabled(false);
lc.runAsync();
// Wait 30s for consumer finish consuming all the messages
Assert.assertTrue("TIMEOUT: 30s ", latch.await(300000, TimeUnit.MILLISECONDS));
// Check results
Assert.assertTrue("Expected count >= " + totalCount + "; Actual count " + tupleCount.intValue(), totalCount <= tupleCount.intValue());
logger.debug(String.format("Number of emitted tuples: %d", tupleCount.intValue()));
p.close();
lc.shutdown();
}
Aggregations