use of com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent in project apex-core by apache.
the class StreamingContainerManagerTest method testValidGenericOperatorDeployInfoType.
@Test
public void testValidGenericOperatorDeployInfoType() {
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
TestGeneratorInputOperator.ValidGenericOperator o2 = dag.addOperator("o2", TestGeneratorInputOperator.ValidGenericOperator.class);
dag.addStream("stream1", o1.outport1, o2.input);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan physicalPlan = scm.getPhysicalPlan();
List<PTContainer> containers = physicalPlan.getContainers();
for (int i = 0; i < containers.size(); ++i) {
assignContainer(scm, "container" + (i + 1));
}
OperatorMeta o2Meta = dag.getMeta(o2);
PTOperator o2Physical = physicalPlan.getOperators(o2Meta).get(0);
String containerId = o2Physical.getContainer().getExternalId();
OperatorDeployInfo o1DeployInfo = getDeployInfo(scm.getContainerAgent(containerId)).get(0);
Assert.assertEquals("type " + o1DeployInfo, OperatorDeployInfo.OperatorType.GENERIC, o1DeployInfo.type);
}
use of com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent in project apex-core by apache.
the class StreamingContainerManagerTest method testRecoveryOrder.
@Test
public void testRecoveryOrder() throws Exception {
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
GenericTestOperator node3 = dag.addOperator("node3", GenericTestOperator.class);
dag.addStream("n1n2", node1.outport1, node2.inport1);
dag.addStream("n2n3", node2.outport1, node3.inport1);
dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 2);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
StreamingContainerManager scm = new StreamingContainerManager(dag);
Assert.assertEquals("" + scm.containerStartRequests, 2, scm.containerStartRequests.size());
scm.containerStartRequests.clear();
PhysicalPlan plan = scm.getPhysicalPlan();
List<PTContainer> containers = plan.getContainers();
Assert.assertEquals("" + containers, 2, plan.getContainers().size());
PTContainer c1 = containers.get(0);
Assert.assertEquals("c1.operators " + c1.getOperators(), 2, c1.getOperators().size());
PTContainer c2 = containers.get(1);
Assert.assertEquals("c2.operators " + c2.getOperators(), 1, c2.getOperators().size());
assignContainer(scm, "container1");
assignContainer(scm, "container2");
StreamingContainerAgent sca1 = scm.getContainerAgent(c1.getExternalId());
StreamingContainerAgent sca2 = scm.getContainerAgent(c2.getExternalId());
Assert.assertEquals("", 0, countState(sca1.container, PTOperator.State.PENDING_UNDEPLOY));
Assert.assertEquals("", 2, countState(sca1.container, PTOperator.State.PENDING_DEPLOY));
scm.scheduleContainerRestart(c1.getExternalId());
Assert.assertEquals("", 0, countState(sca1.container, PTOperator.State.PENDING_UNDEPLOY));
Assert.assertEquals("", 2, countState(sca1.container, PTOperator.State.PENDING_DEPLOY));
Assert.assertEquals("" + scm.containerStartRequests, 1, scm.containerStartRequests.size());
ContainerStartRequest dr = scm.containerStartRequests.peek();
Assert.assertNotNull(dr);
Assert.assertEquals("" + sca2.container, 1, countState(sca2.container, PTOperator.State.PENDING_UNDEPLOY));
Assert.assertEquals("" + sca2.container, 0, countState(sca2.container, PTOperator.State.PENDING_DEPLOY));
}
use of com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent in project apex-core by apache.
the class StreamingContainerManagerTest method testOperatorShutdown.
@Test
public void testOperatorShutdown() {
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
dag.addStream("stream1", o1.outport1, o2.inport1);
dag.addStream("stream2", o2.outport1, o3.inport1);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan physicalPlan = scm.getPhysicalPlan();
Map<PTContainer, MockContainer> mockContainers = new HashMap<>();
for (PTContainer c : physicalPlan.getContainers()) {
MockContainer mc = new MockContainer(scm, c);
mockContainers.put(c, mc);
}
// deploy all containers
for (Map.Entry<PTContainer, MockContainer> ce : mockContainers.entrySet()) {
ce.getValue().deploy();
}
for (Map.Entry<PTContainer, MockContainer> ce : mockContainers.entrySet()) {
// skip buffer server purge in monitorHeartbeat
ce.getKey().bufferServerAddress = null;
}
List<PTOperator> o1p = physicalPlan.getOperators(dag.getMeta(o1));
Assert.assertEquals("o1 partitions", 1, o1p.size());
PTOperator o1p1 = o1p.get(0);
MockContainer mc1 = mockContainers.get(o1p1.getContainer());
MockOperatorStats o1p1mos = mc1.stats(o1p1.getId());
o1p1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc1.sendHeartbeat();
PTOperator o2p1 = physicalPlan.getOperators(dag.getMeta(o2)).get(0);
MockContainer mc2 = mockContainers.get(o2p1.getContainer());
MockOperatorStats o2p1mos = mc2.stats(o2p1.getId());
o2p1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc2.sendHeartbeat();
Assert.assertEquals("o2 partitions", 2, physicalPlan.getOperators(dag.getMeta(o2)).size());
PTOperator o2p2 = physicalPlan.getOperators(dag.getMeta(o2)).get(1);
MockContainer mc3 = mockContainers.get(o2p2.getContainer());
MockOperatorStats o2p2mos = mc3.stats(o2p2.getId());
o2p2mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc3.sendHeartbeat();
Assert.assertEquals("o3 partitions", 1, physicalPlan.getOperators(dag.getMeta(o3)).size());
PTOperator o3p1 = physicalPlan.getOperators(dag.getMeta(o3)).get(0);
MockContainer mc4 = mockContainers.get(o3p1.getContainer());
MockOperatorStats o3p1mos = mc4.stats(o3p1.getId());
MockOperatorStats unifierp1mos = mc4.stats(o3p1.upstreamMerge.values().iterator().next().getId());
unifierp1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
o3p1mos.currentWindowId(1).checkpointWindowId(1).deployState(DeployState.ACTIVE);
mc4.sendHeartbeat();
o1p1mos.currentWindowId(2).deployState(DeployState.SHUTDOWN);
mc1.sendHeartbeat();
o1p = physicalPlan.getOperators(dag.getMeta(o1));
Assert.assertEquals("o1 partitions", 1, o1p.size());
Assert.assertEquals("o1p1 present", o1p1, o1p.get(0));
Assert.assertEquals("input operator state", PTOperator.State.INACTIVE, o1p1.getState());
scm.monitorHeartbeat(false);
Assert.assertEquals("committedWindowId", -1, scm.getCommittedWindowId());
// committedWindowId updated in next cycle
scm.monitorHeartbeat(false);
Assert.assertEquals("committedWindowId", 1, scm.getCommittedWindowId());
scm.processEvents();
Assert.assertEquals("containers at committedWindowId=1", 4, physicalPlan.getContainers().size());
// checkpoint window 2
o1p1mos.checkpointWindowId(2);
mc1.sendHeartbeat();
scm.monitorHeartbeat(false);
Assert.assertEquals("committedWindowId", 1, scm.getCommittedWindowId());
o2p1mos.currentWindowId(2).checkpointWindowId(2);
o2p2mos.currentWindowId(2).checkpointWindowId(2);
o3p1mos.currentWindowId(2).checkpointWindowId(2);
unifierp1mos.currentWindowId(2).checkpointWindowId(2);
mc2.sendHeartbeat();
mc3.sendHeartbeat();
mc4.sendHeartbeat();
scm.monitorHeartbeat(false);
// Operators are shutdown when both operators reach window Id 2
Assert.assertEquals(0, o1p1.getContainer().getOperators().size());
Assert.assertEquals(0, o2p1.getContainer().getOperators().size());
Assert.assertEquals(0, physicalPlan.getContainers().size());
}
use of com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent in project apex-core by apache.
the class StreamingContainerManagerTest method testStaticPartitioning.
@Test
public void testStaticPartitioning() {
//
// ,---> node2----,
// | |
// node1---+---> node2----+---> unifier | node3
// | |
// '---> node2----'
//
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
PhysicalPlanTest.PartitioningTestOperator node2 = dag.addOperator("node2", PhysicalPlanTest.PartitioningTestOperator.class);
node2.setPartitionCount(3);
dag.setOperatorAttribute(node2, OperatorContext.SPIN_MILLIS, 10);
/* this should not affect anything materially */
dag.setOutputPortAttribute(node2.outport1, PortContext.QUEUE_CAPACITY, 1111);
GenericTestOperator node3 = dag.addOperator("node3", GenericTestOperator.class);
dag.setInputPortAttribute(node3.inport1, PortContext.QUEUE_CAPACITY, 2222);
LogicalPlan.StreamMeta n1n2 = dag.addStream("n1n2", node1.outport1, node2.inport1);
LogicalPlan.StreamMeta n2n3 = dag.addStream("n2n3", node2.outport1, node3.inport1);
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, Integer.MAX_VALUE);
MemoryStorageAgent msa = new MemoryStorageAgent();
dag.setAttribute(OperatorContext.STORAGE_AGENT, msa);
StreamingContainerManager dnm = new StreamingContainerManager(dag);
PhysicalPlan plan = dnm.getPhysicalPlan();
Assert.assertEquals("number containers", 5, plan.getContainers().size());
List<StreamingContainerAgent> containerAgents = Lists.newArrayList();
for (int i = 0; i < plan.getContainers().size(); i++) {
containerAgents.add(assignContainer(dnm, "container" + (i + 1)));
}
PTContainer c = plan.getOperators(dag.getMeta(node1)).get(0).getContainer();
StreamingContainerAgent sca1 = dnm.getContainerAgent(c.getExternalId());
List<OperatorDeployInfo> c1 = getDeployInfo(sca1);
Assert.assertEquals("number operators assigned to container", 1, c1.size());
Assert.assertTrue(dag.getMeta(node2) + " assigned to " + sca1.container.getExternalId(), containsNodeContext(c1, dag.getMeta(node1)));
List<PTOperator> o2Partitions = plan.getOperators(dag.getMeta(node2));
Assert.assertEquals("number partitions", TestStaticPartitioningSerDe.partitions.length, o2Partitions.size());
for (int i = 0; i < o2Partitions.size(); i++) {
String containerId = o2Partitions.get(i).getContainer().getExternalId();
List<OperatorDeployInfo> cc = getDeployInfo(dnm.getContainerAgent(containerId));
Assert.assertEquals("number operators assigned to container", 1, cc.size());
Assert.assertTrue(dag.getMeta(node2) + " assigned to " + containerId, containsNodeContext(cc, dag.getMeta(node2)));
// n1n2 in, mergeStream out
OperatorDeployInfo ndi = cc.get(0);
Assert.assertEquals("type " + ndi, OperatorDeployInfo.OperatorType.GENERIC, ndi.type);
Assert.assertEquals("inputs " + ndi, 1, ndi.inputs.size());
Assert.assertEquals("outputs " + ndi, 1, ndi.outputs.size());
InputDeployInfo nidi = ndi.inputs.get(0);
Assert.assertEquals("stream " + nidi, n1n2.getName(), nidi.declaredStreamId);
Assert.assertEquals("partition for " + containerId, Sets.newHashSet(node2.partitionKeys[i]), nidi.partitionKeys);
Assert.assertEquals("number stream codecs for " + nidi, 1, nidi.streamCodecs.size());
}
List<OperatorDeployInfo> cUnifier = getDeployInfo(dnm.getContainerAgent(plan.getOperators(dag.getMeta(node3)).get(0).getContainer().getExternalId()));
Assert.assertEquals("number operators " + cUnifier, 2, cUnifier.size());
OperatorDeployInfo mergeNodeDI = getNodeDeployInfo(cUnifier, dag.getMeta(node2).getMeta(node2.outport1).getUnifierMeta());
Assert.assertNotNull("unifier for " + node2, mergeNodeDI);
Assert.assertEquals("type " + mergeNodeDI, OperatorDeployInfo.OperatorType.UNIFIER, mergeNodeDI.type);
Assert.assertEquals("inputs " + mergeNodeDI, 3, mergeNodeDI.inputs.size());
List<Integer> sourceNodeIds = Lists.newArrayList();
for (InputDeployInfo nidi : mergeNodeDI.inputs) {
Assert.assertEquals("streamName " + nidi, n2n3.getName(), nidi.declaredStreamId);
String mergePortName = "<merge#" + dag.getMeta(node2).getMeta(node2.outport1).getPortName() + ">";
Assert.assertEquals("portName " + nidi, mergePortName, nidi.portName);
Assert.assertNotNull("sourceNodeId " + nidi, nidi.sourceNodeId);
Assert.assertNotNull("contextAttributes " + nidi, nidi.contextAttributes);
Assert.assertEquals("contextAttributes ", new Integer(1111), nidi.getValue(PortContext.QUEUE_CAPACITY));
sourceNodeIds.add(nidi.sourceNodeId);
}
for (PTOperator node : dnm.getPhysicalPlan().getOperators(dag.getMeta(node2))) {
Assert.assertTrue(sourceNodeIds + " contains " + node.getId(), sourceNodeIds.contains(node.getId()));
}
Assert.assertEquals("outputs " + mergeNodeDI, 1, mergeNodeDI.outputs.size());
for (OutputDeployInfo odi : mergeNodeDI.outputs) {
Assert.assertNotNull("contextAttributes " + odi, odi.contextAttributes);
Assert.assertEquals("contextAttributes ", new Integer(2222), odi.getValue(PortContext.QUEUE_CAPACITY));
}
try {
Object operator = msa.load(mergeNodeDI.id, Stateless.WINDOW_ID);
Assert.assertTrue("" + operator, operator instanceof DefaultUnifier);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
// node3 container
c = plan.getOperators(dag.getMeta(node3)).get(0).getContainer();
List<OperatorDeployInfo> cmerge = getDeployInfo(dnm.getContainerAgent(c.getExternalId()));
Assert.assertEquals("number operators " + cmerge, 2, cmerge.size());
OperatorDeployInfo node3DI = getNodeDeployInfo(cmerge, dag.getMeta(node3));
Assert.assertNotNull(dag.getMeta(node3) + " assigned", node3DI);
Assert.assertEquals("inputs " + node3DI, 1, node3DI.inputs.size());
InputDeployInfo node3In = node3DI.inputs.get(0);
Assert.assertEquals("streamName " + node3In, n2n3.getName(), node3In.declaredStreamId);
Assert.assertEquals("portName " + node3In, dag.getMeta(node3).getMeta(node3.inport1).getPortName(), node3In.portName);
Assert.assertNotNull("sourceNodeId " + node3DI, node3In.sourceNodeId);
Assert.assertEquals("sourcePortName " + node3DI, mergeNodeDI.outputs.get(0).portName, node3In.sourcePortName);
}
use of com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent in project apex-core by apache.
the class AffinityRulesTest method testOperatorPartitionsAntiAffinity.
@Test
public void testOperatorPartitionsAntiAffinity() {
LogicalPlan dag = new LogicalPlan();
TestGeneratorInputOperator o1 = dag.addOperator("O1", new TestGeneratorInputOperator());
GenericTestOperator o2 = dag.addOperator("O2", new GenericTestOperator());
GenericTestOperator o3 = dag.addOperator("O3", new GenericTestOperator());
dag.addStream("stream1", o1.outport, o2.inport1);
dag.addStream("stream2", o2.outport1, o3.inport1);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(5));
AffinityRulesSet ruleSet = new AffinityRulesSet();
// Valid case:
List<AffinityRule> rules = new ArrayList<>();
ruleSet.setAffinityRules(rules);
AffinityRule rule1 = new AffinityRule(Type.ANTI_AFFINITY, Locality.NODE_LOCAL, false, "O2", "O2");
rules.add(rule1);
dag.setAttribute(DAGContext.AFFINITY_RULES_SET, ruleSet);
dag.validate();
dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, testMeta.getAbsolutePath());
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
StreamingContainerManager scm = new StreamingContainerManager(dag);
for (ContainerStartRequest csr : scm.containerStartRequests) {
PTContainer container = csr.container;
if (container.getOperators().get(0).getName().equals("O2")) {
Assert.assertEquals("Anti-affinity containers set should have 4 containers for other partitions ", 4, container.getStrictAntiPrefs().size());
for (PTContainer c : container.getStrictAntiPrefs()) {
for (PTOperator operator : c.getOperators()) {
Assert.assertEquals("Partion for O2 should be Anti Prefs", "O2", operator.getName());
}
}
}
}
// Check resource handler assigns different hosts for each partition
ResourceRequestHandler rr = new ResourceRequestHandler();
int containerMem = 1000;
Map<String, NodeReport> nodeReports = Maps.newHashMap();
for (int i = 0; i < 10; i++) {
String hostName = "host" + i;
NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId(hostName, 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0);
nodeReports.put(nr.getNodeId().getHost(), nr);
}
// set resources
rr.updateNodeReports(Lists.newArrayList(nodeReports.values()));
Set<String> partitionHostNames = new HashSet<>();
for (ContainerStartRequest csr : scm.containerStartRequests) {
String host = rr.getHost(csr, true);
csr.container.host = host;
if (csr.container.getOperators().get(0).getName().equals("O2")) {
Assert.assertNotNull("Host name should not be null", host);
LOG.info("Partition {} for operator O2 has host = {} ", csr.container.getId(), host);
Assert.assertTrue("Each Partition should have a different host", !partitionHostNames.contains(host));
partitionHostNames.add(host);
}
}
}
Aggregations