use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StramRecoveryTest method testRestartApp.
private void testRestartApp(StorageAgent agent, String appPath1) throws Exception {
String appId1 = "app1";
String appId2 = "app2";
String appPath2 = testMeta.getPath() + "/" + appId2;
dag.setAttribute(LogicalPlan.APPLICATION_ID, appId1);
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath1);
dag.setAttribute(LogicalPlan.APPLICATION_ATTEMPT_ID, 1);
dag.setAttribute(OperatorContext.STORAGE_AGENT, agent);
dag.addOperator("o1", StatsListeningOperator.class);
FSRecoveryHandler recoveryHandler = new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false));
StreamingContainerManager.getInstance(recoveryHandler, dag, false);
// test restore initial snapshot + log
dag = new LogicalPlan();
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath1);
StreamingContainerManager scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
PhysicalPlan plan = scm.getPhysicalPlan();
// original plan
dag = plan.getLogicalPlan();
Assert.assertNotNull("operator", dag.getOperatorMeta("o1"));
PTOperator o1p1 = plan.getOperators(dag.getOperatorMeta("o1")).get(0);
long[] ids = new FSStorageAgent(appPath1 + "/" + LogicalPlan.SUBDIR_CHECKPOINTS, new Configuration()).getWindowIds(o1p1.getId());
Assert.assertArrayEquals(new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
Assert.assertNull(o1p1.getContainer().getExternalId());
// trigger journal write
o1p1.getContainer().setExternalId("cid1");
scm.writeJournal(o1p1.getContainer().getSetContainerState());
/* simulate application restart from app1 */
dag = new LogicalPlan();
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath2);
dag.setAttribute(LogicalPlan.APPLICATION_ID, appId2);
StramClient sc = new StramClient(new Configuration(), dag);
try {
sc.start();
sc.copyInitialState(new Path(appPath1));
} finally {
sc.stop();
}
scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
plan = scm.getPhysicalPlan();
dag = plan.getLogicalPlan();
assertEquals("modified appId", appId2, dag.getValue(LogicalPlan.APPLICATION_ID));
assertEquals("modified appPath", appPath2, dag.getValue(LogicalPlan.APPLICATION_PATH));
Assert.assertNotNull("operator", dag.getOperatorMeta("o1"));
o1p1 = plan.getOperators(dag.getOperatorMeta("o1")).get(0);
assertEquals("journal copied", "cid1", o1p1.getContainer().getExternalId());
CascadeStorageAgent csa = (CascadeStorageAgent) dag.getAttributes().get(OperatorContext.STORAGE_AGENT);
Assert.assertEquals("storage agent is replaced by cascade", csa.getClass(), CascadeStorageAgent.class);
Assert.assertEquals("current storage agent is of same type", csa.getCurrentStorageAgent().getClass(), agent.getClass());
Assert.assertEquals("parent storage agent is of same type ", csa.getParentStorageAgent().getClass(), agent.getClass());
/* parent and current points to expected location */
Assert.assertEquals(true, ((FSStorageAgent) csa.getParentStorageAgent()).path.contains("app1"));
Assert.assertEquals(true, ((FSStorageAgent) csa.getCurrentStorageAgent()).path.contains("app2"));
ids = csa.getWindowIds(o1p1.getId());
Assert.assertArrayEquals("checkpoints copied", new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
/* simulate another application restart from app2 */
String appId3 = "app3";
String appPath3 = testMeta.getPath() + "/" + appId3;
dag = new LogicalPlan();
dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath3);
dag.setAttribute(LogicalPlan.APPLICATION_ID, appId3);
sc = new StramClient(new Configuration(), dag);
try {
sc.start();
// copy state from app2.
sc.copyInitialState(new Path(appPath2));
} finally {
sc.stop();
}
scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
plan = scm.getPhysicalPlan();
dag = plan.getLogicalPlan();
csa = (CascadeStorageAgent) dag.getAttributes().get(OperatorContext.STORAGE_AGENT);
Assert.assertEquals("storage agent is replaced by cascade", csa.getClass(), CascadeStorageAgent.class);
Assert.assertEquals("current storage agent is of same type", csa.getCurrentStorageAgent().getClass(), agent.getClass());
Assert.assertEquals("parent storage agent is of same type ", csa.getParentStorageAgent().getClass(), CascadeStorageAgent.class);
CascadeStorageAgent parent = (CascadeStorageAgent) csa.getParentStorageAgent();
Assert.assertEquals("current storage agent is of same type ", parent.getCurrentStorageAgent().getClass(), agent.getClass());
Assert.assertEquals("parent storage agent is cascade ", parent.getParentStorageAgent().getClass(), agent.getClass());
/* verify paths */
Assert.assertEquals(true, ((FSStorageAgent) parent.getParentStorageAgent()).path.contains("app1"));
Assert.assertEquals(true, ((FSStorageAgent) parent.getCurrentStorageAgent()).path.contains("app2"));
Assert.assertEquals(true, ((FSStorageAgent) csa.getCurrentStorageAgent()).path.contains("app3"));
ids = csa.getWindowIds(o1p1.getId());
Assert.assertArrayEquals("checkpoints copied", new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StreamingContainerManagerTest method testValidGenericOperatorDeployInfoType.
@Test
public void testValidGenericOperatorDeployInfoType() {
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
TestGeneratorInputOperator.ValidGenericOperator o2 = dag.addOperator("o2", TestGeneratorInputOperator.ValidGenericOperator.class);
dag.addStream("stream1", o1.outport1, o2.input);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan physicalPlan = scm.getPhysicalPlan();
List<PTContainer> containers = physicalPlan.getContainers();
for (int i = 0; i < containers.size(); ++i) {
assignContainer(scm, "container" + (i + 1));
}
OperatorMeta o2Meta = dag.getMeta(o2);
PTOperator o2Physical = physicalPlan.getOperators(o2Meta).get(0);
String containerId = o2Physical.getContainer().getExternalId();
OperatorDeployInfo o1DeployInfo = getDeployInfo(scm.getContainerAgent(containerId)).get(0);
Assert.assertEquals("type " + o1DeployInfo, OperatorDeployInfo.OperatorType.GENERIC, o1DeployInfo.type);
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StreamingContainerManagerTest method testStaticPartitioning.
@Test
public void testStaticPartitioning() {
//
// ,---> node2----,
// | |
// node1---+---> node2----+---> unifier | node3
// | |
// '---> node2----'
//
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
PhysicalPlanTest.PartitioningTestOperator node2 = dag.addOperator("node2", PhysicalPlanTest.PartitioningTestOperator.class);
node2.setPartitionCount(3);
dag.setOperatorAttribute(node2, OperatorContext.SPIN_MILLIS, 10);
/* this should not affect anything materially */
dag.setOutputPortAttribute(node2.outport1, PortContext.QUEUE_CAPACITY, 1111);
GenericTestOperator node3 = dag.addOperator("node3", GenericTestOperator.class);
dag.setInputPortAttribute(node3.inport1, PortContext.QUEUE_CAPACITY, 2222);
LogicalPlan.StreamMeta n1n2 = dag.addStream("n1n2", node1.outport1, node2.inport1);
LogicalPlan.StreamMeta n2n3 = dag.addStream("n2n3", node2.outport1, node3.inport1);
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, Integer.MAX_VALUE);
MemoryStorageAgent msa = new MemoryStorageAgent();
dag.setAttribute(OperatorContext.STORAGE_AGENT, msa);
StreamingContainerManager dnm = new StreamingContainerManager(dag);
PhysicalPlan plan = dnm.getPhysicalPlan();
Assert.assertEquals("number containers", 5, plan.getContainers().size());
List<StreamingContainerAgent> containerAgents = Lists.newArrayList();
for (int i = 0; i < plan.getContainers().size(); i++) {
containerAgents.add(assignContainer(dnm, "container" + (i + 1)));
}
PTContainer c = plan.getOperators(dag.getMeta(node1)).get(0).getContainer();
StreamingContainerAgent sca1 = dnm.getContainerAgent(c.getExternalId());
List<OperatorDeployInfo> c1 = getDeployInfo(sca1);
Assert.assertEquals("number operators assigned to container", 1, c1.size());
Assert.assertTrue(dag.getMeta(node2) + " assigned to " + sca1.container.getExternalId(), containsNodeContext(c1, dag.getMeta(node1)));
List<PTOperator> o2Partitions = plan.getOperators(dag.getMeta(node2));
Assert.assertEquals("number partitions", TestStaticPartitioningSerDe.partitions.length, o2Partitions.size());
for (int i = 0; i < o2Partitions.size(); i++) {
String containerId = o2Partitions.get(i).getContainer().getExternalId();
List<OperatorDeployInfo> cc = getDeployInfo(dnm.getContainerAgent(containerId));
Assert.assertEquals("number operators assigned to container", 1, cc.size());
Assert.assertTrue(dag.getMeta(node2) + " assigned to " + containerId, containsNodeContext(cc, dag.getMeta(node2)));
// n1n2 in, mergeStream out
OperatorDeployInfo ndi = cc.get(0);
Assert.assertEquals("type " + ndi, OperatorDeployInfo.OperatorType.GENERIC, ndi.type);
Assert.assertEquals("inputs " + ndi, 1, ndi.inputs.size());
Assert.assertEquals("outputs " + ndi, 1, ndi.outputs.size());
InputDeployInfo nidi = ndi.inputs.get(0);
Assert.assertEquals("stream " + nidi, n1n2.getName(), nidi.declaredStreamId);
Assert.assertEquals("partition for " + containerId, Sets.newHashSet(node2.partitionKeys[i]), nidi.partitionKeys);
Assert.assertEquals("number stream codecs for " + nidi, 1, nidi.streamCodecs.size());
}
List<OperatorDeployInfo> cUnifier = getDeployInfo(dnm.getContainerAgent(plan.getOperators(dag.getMeta(node3)).get(0).getContainer().getExternalId()));
Assert.assertEquals("number operators " + cUnifier, 2, cUnifier.size());
OperatorDeployInfo mergeNodeDI = getNodeDeployInfo(cUnifier, dag.getMeta(node2).getMeta(node2.outport1).getUnifierMeta());
Assert.assertNotNull("unifier for " + node2, mergeNodeDI);
Assert.assertEquals("type " + mergeNodeDI, OperatorDeployInfo.OperatorType.UNIFIER, mergeNodeDI.type);
Assert.assertEquals("inputs " + mergeNodeDI, 3, mergeNodeDI.inputs.size());
List<Integer> sourceNodeIds = Lists.newArrayList();
for (InputDeployInfo nidi : mergeNodeDI.inputs) {
Assert.assertEquals("streamName " + nidi, n2n3.getName(), nidi.declaredStreamId);
String mergePortName = "<merge#" + dag.getMeta(node2).getMeta(node2.outport1).getPortName() + ">";
Assert.assertEquals("portName " + nidi, mergePortName, nidi.portName);
Assert.assertNotNull("sourceNodeId " + nidi, nidi.sourceNodeId);
Assert.assertNotNull("contextAttributes " + nidi, nidi.contextAttributes);
Assert.assertEquals("contextAttributes ", new Integer(1111), nidi.getValue(PortContext.QUEUE_CAPACITY));
sourceNodeIds.add(nidi.sourceNodeId);
}
for (PTOperator node : dnm.getPhysicalPlan().getOperators(dag.getMeta(node2))) {
Assert.assertTrue(sourceNodeIds + " contains " + node.getId(), sourceNodeIds.contains(node.getId()));
}
Assert.assertEquals("outputs " + mergeNodeDI, 1, mergeNodeDI.outputs.size());
for (OutputDeployInfo odi : mergeNodeDI.outputs) {
Assert.assertNotNull("contextAttributes " + odi, odi.contextAttributes);
Assert.assertEquals("contextAttributes ", new Integer(2222), odi.getValue(PortContext.QUEUE_CAPACITY));
}
try {
Object operator = msa.load(mergeNodeDI.id, Stateless.WINDOW_ID);
Assert.assertTrue("" + operator, operator instanceof DefaultUnifier);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
// node3 container
c = plan.getOperators(dag.getMeta(node3)).get(0).getContainer();
List<OperatorDeployInfo> cmerge = getDeployInfo(dnm.getContainerAgent(c.getExternalId()));
Assert.assertEquals("number operators " + cmerge, 2, cmerge.size());
OperatorDeployInfo node3DI = getNodeDeployInfo(cmerge, dag.getMeta(node3));
Assert.assertNotNull(dag.getMeta(node3) + " assigned", node3DI);
Assert.assertEquals("inputs " + node3DI, 1, node3DI.inputs.size());
InputDeployInfo node3In = node3DI.inputs.get(0);
Assert.assertEquals("streamName " + node3In, n2n3.getName(), node3In.declaredStreamId);
Assert.assertEquals("portName " + node3In, dag.getMeta(node3).getMeta(node3.inport1).getPortName(), node3In.portName);
Assert.assertNotNull("sourceNodeId " + node3DI, node3In.sourceNodeId);
Assert.assertEquals("sourcePortName " + node3DI, mergeNodeDI.outputs.get(0).portName, node3In.sourcePortName);
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StreamingContainerManagerTest method testOperatorShutdown.
@Test
public void testOperatorShutdown() {
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
dag.addStream("stream1", o1.outport1, o2.inport1);
dag.addStream("stream2", o2.outport1, o3.inport1);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan physicalPlan = scm.getPhysicalPlan();
Map<PTContainer, MockContainer> mockContainers = new HashMap<>();
for (PTContainer c : physicalPlan.getContainers()) {
MockContainer mc = new MockContainer(scm, c);
mockContainers.put(c, mc);
}
// deploy all containers
for (Map.Entry<PTContainer, MockContainer> ce : mockContainers.entrySet()) {
ce.getValue().deploy();
}
for (Map.Entry<PTContainer, MockContainer> ce : mockContainers.entrySet()) {
// skip buffer server purge in monitorHeartbeat
ce.getKey().bufferServerAddress = null;
}
List<PTOperator> o1p = physicalPlan.getOperators(dag.getMeta(o1));
Assert.assertEquals("o1 partitions", 1, o1p.size());
PTOperator o1p1 = o1p.get(0);
MockContainer mc1 = mockContainers.get(o1p1.getContainer());
MockOperatorStats o1p1mos = mc1.stats(o1p1.getId());
o1p1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc1.sendHeartbeat();
PTOperator o2p1 = physicalPlan.getOperators(dag.getMeta(o2)).get(0);
MockContainer mc2 = mockContainers.get(o2p1.getContainer());
MockOperatorStats o2p1mos = mc2.stats(o2p1.getId());
o2p1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc2.sendHeartbeat();
Assert.assertEquals("o2 partitions", 2, physicalPlan.getOperators(dag.getMeta(o2)).size());
PTOperator o2p2 = physicalPlan.getOperators(dag.getMeta(o2)).get(1);
MockContainer mc3 = mockContainers.get(o2p2.getContainer());
MockOperatorStats o2p2mos = mc3.stats(o2p2.getId());
o2p2mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc3.sendHeartbeat();
Assert.assertEquals("o3 partitions", 1, physicalPlan.getOperators(dag.getMeta(o3)).size());
PTOperator o3p1 = physicalPlan.getOperators(dag.getMeta(o3)).get(0);
MockContainer mc4 = mockContainers.get(o3p1.getContainer());
MockOperatorStats o3p1mos = mc4.stats(o3p1.getId());
MockOperatorStats unifierp1mos = mc4.stats(o3p1.upstreamMerge.values().iterator().next().getId());
unifierp1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
o3p1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc4.sendHeartbeat();
o1p1mos.currentWindowId(2).deployState(DeployState.SHUTDOWN);
mc1.sendHeartbeat();
o1p = physicalPlan.getOperators(dag.getMeta(o1));
Assert.assertEquals("o1 partitions", 1, o1p.size());
Assert.assertEquals("o1p1 present", o1p1, o1p.get(0));
Assert.assertEquals("input operator state", PTOperator.State.INACTIVE, o1p1.getState());
scm.monitorHeartbeat(false);
Assert.assertEquals("committedWindowId", -1, scm.getCommittedWindowId());
// committedWindowId updated in next cycle
scm.monitorHeartbeat(false);
Assert.assertEquals("committedWindowId", 1, scm.getCommittedWindowId());
scm.processEvents();
Assert.assertEquals("containers at committedWindowId=1", 4, physicalPlan.getContainers().size());
// checkpoint window 2
o1p1mos.checkpointWindowId(2);
mc1.sendHeartbeat();
scm.monitorHeartbeat(false);
Assert.assertEquals("committedWindowId", 1, scm.getCommittedWindowId());
o2p1mos.currentWindowId(2).checkpointWindowId(2);
o2p2mos.currentWindowId(2).checkpointWindowId(2);
o3p1mos.currentWindowId(2).checkpointWindowId(2);
unifierp1mos.currentWindowId(2).checkpointWindowId(2);
mc2.sendHeartbeat();
mc3.sendHeartbeat();
mc4.sendHeartbeat();
scm.monitorHeartbeat(false);
// Operators are shutdown when both operators reach window Id 2
Assert.assertEquals(0, o1p1.getContainer().getOperators().size());
Assert.assertEquals(0, o2p1.getContainer().getOperators().size());
Assert.assertEquals(0, physicalPlan.getContainers().size());
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StreamCodecTest method getSingleOperatorDeployInfo.
private OperatorDeployInfo getSingleOperatorDeployInfo(Operator oper, StreamingContainerManager scm) {
LogicalPlan dag = scm.getLogicalPlan();
String id = dag.getMeta(oper).toString();
PhysicalPlan plan = scm.getPhysicalPlan();
List<PTOperator> operators = plan.getOperators(dag.getMeta(oper));
Assert.assertEquals("number of operators " + id, 1, operators.size());
PTOperator operator = operators.get(0);
return getOperatorDeployInfo(operator, id, scm);
}
Aggregations