use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class PartitioningTest method testDynamicDefaultPartitioning.
//@Ignore
@Test
@SuppressWarnings("SleepWhileInLoop")
public void testDynamicDefaultPartitioning() throws Exception {
LogicalPlan dag = new LogicalPlan();
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 5);
File checkpointDir = new File(TEST_OUTPUT_DIR, "testDynamicDefaultPartitioning");
dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new AsyncFSStorageAgent(checkpointDir.getPath(), null));
CollectorOperator.receivedTuples.clear();
TestInputOperator<Integer> input = dag.addOperator("input", new TestInputOperator<Integer>());
input.blockEndStream = true;
CollectorOperator collector = dag.addOperator("partitionedCollector", new CollectorOperator());
collector.prefix = "" + System.identityHashCode(collector);
dag.setOperatorAttribute(collector, OperatorContext.PARTITIONER, new StatelessPartitioner<CollectorOperator>(2));
dag.setOperatorAttribute(collector, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitionLoadWatch() }));
dag.addStream("fromInput", input.output, collector.input);
CollectorOperator singleCollector = dag.addOperator("singleCollector", new CollectorOperator());
singleCollector.prefix = "" + System.identityHashCode(singleCollector);
dag.addStream("toSingleCollector", collector.output, singleCollector.input);
StramLocalCluster lc = new StramLocalCluster(dag);
lc.setHeartbeatMonitoringEnabled(false);
lc.runAsync();
List<PTOperator> partitions = assertNumberPartitions(2, lc, dag.getMeta(collector));
Set<PTContainer> containers = Sets.newHashSet();
for (PTOperator oper : partitions) {
containers.add(oper.getContainer());
}
Assert.assertTrue("Number of containers are 4", 4 == lc.dnmgr.getPhysicalPlan().getContainers().size());
PTOperator splitPartition = partitions.get(0);
PartitionLoadWatch.put(splitPartition, 1);
LOG.debug("Triggered split for {}", splitPartition);
int count = 0;
long startMillis = System.currentTimeMillis();
while (count == 0 && startMillis > System.currentTimeMillis() - StramTestSupport.DEFAULT_TIMEOUT_MILLIS) {
// yield
sleep(20);
count += lc.dnmgr.processEvents();
}
partitions = assertNumberPartitions(3, lc, dag.getMeta(collector));
Assert.assertTrue("container reused", lc.dnmgr.getPhysicalPlan().getContainers().containsAll(containers));
// check deployment
for (PTOperator p : partitions) {
StramTestSupport.waitForActivation(lc, p);
}
PartitionLoadWatch.remove(splitPartition);
for (PTContainer container : lc.dnmgr.getPhysicalPlan().getContainers()) {
int memory = 0;
for (PTOperator operator : container.getOperators()) {
memory += operator.getBufferServerMemory();
memory += operator.getOperatorMeta().getValue(OperatorContext.MEMORY_MB);
}
Assert.assertEquals("memory", memory, container.getRequiredMemoryMB());
}
PTOperator planInput = lc.findByLogicalNode(dag.getMeta(input));
LocalStreamingContainer c = StramTestSupport.waitForActivation(lc, planInput);
Map<Integer, Node<?>> nodeMap = c.getNodes();
Assert.assertEquals("number operators " + nodeMap, 1, nodeMap.size());
@SuppressWarnings({ "unchecked" }) TestInputOperator<Integer> inputDeployed = (TestInputOperator<Integer>) nodeMap.get(planInput.getId()).getOperator();
Assert.assertNotNull("" + nodeMap, inputDeployed);
// add tuple that matches the partition key and check that each partition receives it
ArrayList<Integer> inputTuples = new ArrayList<>();
LOG.debug("Number of partitions {}", partitions.size());
for (PTOperator p : partitions) {
// default partitioning has one port mapping with a single partition key
LOG.debug("Partition key map size: {}", p.getPartitionKeys().size());
inputTuples.add(p.getPartitionKeys().values().iterator().next().partitions.iterator().next());
}
inputDeployed.testTuples = Collections.synchronizedList(new ArrayList<List<Integer>>());
inputDeployed.testTuples.add(inputTuples);
for (PTOperator p : partitions) {
Integer expectedTuple = p.getPartitionKeys().values().iterator().next().partitions.iterator().next();
List<Object> receivedTuples;
int i = 0;
while ((receivedTuples = CollectorOperator.receivedTuples.get(collector.prefix + p.getId())) == null || receivedTuples.isEmpty()) {
if (i++ % 100 == 0) {
LOG.debug("Waiting for tuple: " + p);
}
sleep(10);
}
Assert.assertEquals("received " + p, Arrays.asList(expectedTuple), receivedTuples);
}
// single output operator to receive tuple from each partition
List<PTOperator> operators = lc.getPlanOperators(dag.getMeta(singleCollector));
Assert.assertEquals("number output operator instances " + operators, 1, operators.size());
// ensure redeploy
StramTestSupport.waitForActivation(lc, operators.get(0));
List<Object> receivedTuples;
while ((receivedTuples = CollectorOperator.receivedTuples.get(singleCollector.prefix + operators.get(0).getId())) == null || receivedTuples.size() < inputTuples.size()) {
LOG.debug("Waiting for tuple: " + operators.get(0) + " expected: " + inputTuples + " received: " + receivedTuples);
sleep(20);
}
Assert.assertEquals("output tuples " + receivedTuples, Sets.newHashSet(inputTuples), Sets.newHashSet(receivedTuples));
lc.shutdown();
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StreamingContainerManagerTest method testRecoveryUpstreamInline.
@Test
public void testRecoveryUpstreamInline() throws Exception {
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
dag.addStream("o1o3", o1.outport1, o3.inport1);
dag.addStream("o2o3", o2.outport1, o3.inport2);
dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 2);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan plan = scm.getPhysicalPlan();
Assert.assertEquals(2, plan.getContainers().size());
plan.getOperators(dag.getMeta(o1)).get(0);
Assert.assertEquals(2, plan.getContainers().size());
PTContainer c1 = plan.getContainers().get(0);
Assert.assertEquals(Sets.newHashSet(plan.getOperators(dag.getMeta(o1)).get(0), plan.getOperators(dag.getMeta(o3)).get(0)), Sets.newHashSet(c1.getOperators()));
PTContainer c2 = plan.getContainers().get(1);
assignContainer(scm, "c1");
assignContainer(scm, "c2");
for (PTOperator oper : c1.getOperators()) {
Assert.assertEquals("state " + oper, PTOperator.State.PENDING_DEPLOY, oper.getState());
}
scm.scheduleContainerRestart(c2.getExternalId());
for (PTOperator oper : c1.getOperators()) {
Assert.assertEquals("state " + oper, PTOperator.State.PENDING_UNDEPLOY, oper.getState());
}
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StreamingContainerManagerTest method testPhysicalPropertyUpdate.
@Test
public void testPhysicalPropertyUpdate() throws Exception {
TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.addStream("o1.outport", o1.outport, o2.inport1);
StramLocalCluster lc = new StramLocalCluster(dag);
lc.runAsync();
StreamingContainerManager dnmgr = lc.dnmgr;
Map<Integer, PTOperator> operatorMap = dnmgr.getPhysicalPlan().getAllOperators();
for (PTOperator p : operatorMap.values()) {
StramTestSupport.waitForActivation(lc, p);
}
dnmgr.setPhysicalOperatorProperty(lc.getPlanOperators(dag.getMeta(o1)).get(0).getId(), "maxTuples", "2");
Future<?> future = dnmgr.getPhysicalOperatorProperty(lc.getPlanOperators(dag.getMeta(o1)).get(0).getId(), "maxTuples", 10000);
Object object = future.get(10000, TimeUnit.MILLISECONDS);
Assert.assertNotNull(object);
@SuppressWarnings("unchecked") Map<String, Object> propertyValue = (Map<String, Object>) object;
Assert.assertEquals(2, propertyValue.get("maxTuples"));
lc.shutdown();
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class StreamingContainerManagerTest method testShutdownOperatorTimeout.
@Test
public void testShutdownOperatorTimeout() throws Exception {
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.addStream("s1", o1.outport1, o2.inport1);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
dag.setAttribute(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS, 50);
dag.setAttribute(OperatorContext.TIMEOUT_WINDOW_COUNT, 1);
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan plan = scm.getPhysicalPlan();
PTOperator p1 = plan.getOperators(dag.getMeta(o1)).get(0);
PTOperator p2 = plan.getOperators(dag.getMeta(o2)).get(0);
shutdownOperator(scm, p1, p2);
scm.monitorHeartbeat(false);
Assert.assertTrue(scm.containerStopRequests.isEmpty());
Thread.sleep(100);
scm.monitorHeartbeat(false);
Assert.assertFalse(scm.containerStopRequests.containsKey(p1.getContainer().getExternalId()));
Assert.assertTrue(scm.containerStopRequests.containsKey(p2.getContainer().getExternalId()));
}
use of com.datatorrent.stram.plan.physical.PTOperator in project apex-core by apache.
the class DelayOperatorTest method testCheckpointUpdate.
@Test
public void testCheckpointUpdate() {
LogicalPlan dag = StramTestSupport.createDAG(testMeta);
TestGeneratorInputOperator opA = dag.addOperator("A", TestGeneratorInputOperator.class);
GenericTestOperator opB = dag.addOperator("B", GenericTestOperator.class);
GenericTestOperator opC = dag.addOperator("C", GenericTestOperator.class);
GenericTestOperator opD = dag.addOperator("D", GenericTestOperator.class);
DefaultDelayOperator<Object> opDelay = dag.addOperator("opDelay", new DefaultDelayOperator<>());
dag.addStream("AtoB", opA.outport, opB.inport1);
dag.addStream("BtoC", opB.outport1, opC.inport1);
dag.addStream("CtoD", opC.outport1, opD.inport1);
dag.addStream("CtoDelay", opC.outport2, opDelay.input);
dag.addStream("DelayToB", opDelay.output, opB.inport2);
dag.validate();
dag.setAttribute(com.datatorrent.api.Context.OperatorContext.STORAGE_AGENT, new MemoryStorageAgent());
StreamingContainerManager scm = new StreamingContainerManager(dag);
PhysicalPlan plan = scm.getPhysicalPlan();
// set all operators as active to enable recovery window id update
for (PTOperator oper : plan.getAllOperators().values()) {
oper.setState(PTOperator.State.ACTIVE);
}
Clock clock = new SystemClock();
PTOperator opA1 = plan.getOperators(dag.getMeta(opA)).get(0);
PTOperator opB1 = plan.getOperators(dag.getMeta(opB)).get(0);
PTOperator opC1 = plan.getOperators(dag.getMeta(opC)).get(0);
PTOperator opDelay1 = plan.getOperators(dag.getMeta(opDelay)).get(0);
PTOperator opD1 = plan.getOperators(dag.getMeta(opD)).get(0);
Checkpoint cp3 = new Checkpoint(3L, 0, 0);
Checkpoint cp5 = new Checkpoint(5L, 0, 0);
Checkpoint cp4 = new Checkpoint(4L, 0, 0);
opB1.checkpoints.add(cp3);
opC1.checkpoints.add(cp3);
opC1.checkpoints.add(cp4);
opDelay1.checkpoints.add(cp3);
opDelay1.checkpoints.add(cp5);
opD1.checkpoints.add(cp5);
// construct grouping that would be supplied through LogicalPlan
Set<OperatorMeta> stronglyConnected = Sets.newHashSet(dag.getMeta(opB), dag.getMeta(opC), dag.getMeta(opDelay));
Map<OperatorMeta, Set<OperatorMeta>> groups = new HashMap<>();
for (OperatorMeta om : stronglyConnected) {
groups.put(om, stronglyConnected);
}
UpdateCheckpointsContext ctx = new UpdateCheckpointsContext(clock, false, groups);
scm.updateRecoveryCheckpoints(opB1, ctx, false);
Assert.assertEquals("checkpoint " + opA1, Checkpoint.INITIAL_CHECKPOINT, opA1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + opB1, cp3, opC1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + opC1, cp3, opC1.getRecoveryCheckpoint());
Assert.assertEquals("checkpoint " + opD1, cp5, opD1.getRecoveryCheckpoint());
}
Aggregations