Search in sources :

Example 51 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class StramMiniClusterTest method testOperatorFailureRecovery.

@Test
public void testOperatorFailureRecovery() throws Exception {
    LogicalPlan dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.APPLICATION_PATH, testMeta.toURI().toString());
    FailingOperator badOperator = dag.addOperator("badOperator", FailingOperator.class);
    dag.getContextAttributes(badOperator).put(OperatorContext.RECOVERY_ATTEMPTS, 1);
    LOG.info("Initializing Client");
    StramClient client = new StramClient(conf, dag);
    if (StringUtils.isBlank(System.getenv("JAVA_HOME"))) {
        // JAVA_HOME not set in the yarn mini cluster
        client.javaCmd = "java";
    }
    try {
        client.start();
        client.startApplication();
        client.setClientTimeout(120000);
        boolean result = client.monitorApplication();
        LOG.info("Client run completed. Result=" + result);
        Assert.assertFalse("should fail", result);
        ApplicationReport ar = client.getApplicationReport();
        Assert.assertEquals("should fail", FinalApplicationStatus.FAILED, ar.getFinalApplicationStatus());
    // unable to get the diagnostics message set by the AM here - see YARN-208
    // diagnostics message does not make it here even with Hadoop 2.2 (but works on standalone cluster)
    //Assert.assertTrue("appReport " + ar, ar.getDiagnostics().contains("badOperator"));
    } finally {
        client.stop();
    }
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) Test(org.junit.Test)

Example 52 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class StramMiniClusterTest method testAddAttributeToArgs.

@Test
public void testAddAttributeToArgs() throws Exception {
    LogicalPlan dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.APPLICATION_NAME, APP_NAME);
    AddAttributeToArgsOperator operator = dag.addOperator("test", AddAttributeToArgsOperator.class);
    dag.getContextAttributes(operator).put(OperatorContext.RECOVERY_ATTEMPTS, 0);
    StramClient client = new StramClient(conf, dag);
    if (StringUtils.isBlank(System.getenv("JAVA_HOME"))) {
        client.javaCmd = "java";
    }
    try {
        client.start();
        client.startApplication();
        Assert.assertTrue(client.monitorApplication());
    } finally {
        client.stop();
    }
}
Also used : LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) Test(org.junit.Test)

Example 53 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class StramMiniClusterTest method createDAG.

private LogicalPlan createDAG(LogicalPlanConfiguration lpc) throws Exception {
    LogicalPlan dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.APPLICATION_PATH, testMeta.toURI().toString());
    lpc.prepareDAG(dag, null, "testApp");
    dag.validate();
    Assert.assertEquals("", Integer.valueOf(128), dag.getValue(DAG.MASTER_MEMORY_MB));
    Assert.assertEquals("", "-Dlog4j.properties=custom_log4j.properties", dag.getValue(DAG.CONTAINER_JVM_OPTIONS));
    return dag;
}
Also used : LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan)

Example 54 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class StreamCodecTest method checkMxNStreamCodecs.

private void checkMxNStreamCodecs(GenericTestOperator node1, GenericTestOperator node2, GenericTestOperator node3, StreamingContainerManager dnm) {
    LogicalPlan dag = dnm.getLogicalPlan();
    PhysicalPlan plan = dnm.getPhysicalPlan();
    List<PTContainer> containers = plan.getContainers();
    LogicalPlan.OperatorMeta n1meta = dag.getMeta(node1);
    LogicalPlan.OperatorMeta n2meta = dag.getMeta(node2);
    LogicalPlan.OperatorMeta n3meta = dag.getMeta(node3);
    for (PTContainer container : containers) {
        List<PTOperator> operators = container.getOperators();
        for (PTOperator operator : operators) {
            if (!operator.isUnifier()) {
                if (operator.getOperatorMeta() == n1meta) {
                    OperatorDeployInfo odi = getOperatorDeployInfo(operator, n1meta.getName(), dnm);
                    OperatorDeployInfo.OutputDeployInfo otdi = getOutputDeployInfo(odi, n1meta.getMeta(node1.outport1));
                    String id = n1meta.getName() + " " + otdi.portName;
                    Assert.assertEquals("number stream codecs " + id, otdi.streamCodecs.size(), 2);
                    checkPresentStreamCodec(n2meta, node2.inport1, otdi.streamCodecs, id, plan);
                    checkPresentStreamCodec(n3meta, node3.inport1, otdi.streamCodecs, id, plan);
                } else if (operator.getOperatorMeta() == n2meta) {
                    OperatorDeployInfo odi = getOperatorDeployInfo(operator, n2meta.getName(), dnm);
                    OperatorDeployInfo.InputDeployInfo idi = getInputDeployInfo(odi, n2meta.getMeta(node2.inport1));
                    String id = n2meta.getName() + " " + idi.portName;
                    Assert.assertEquals("number stream codecs " + id, idi.streamCodecs.size(), 1);
                    checkPresentStreamCodec(n2meta, node2.inport1, idi.streamCodecs, id, plan);
                } else if (operator.getOperatorMeta() == n3meta) {
                    OperatorDeployInfo odi = getOperatorDeployInfo(operator, n3meta.getName(), dnm);
                    OperatorDeployInfo.InputDeployInfo idi = getInputDeployInfo(odi, n3meta.getMeta(node3.inport1));
                    String id = n3meta.getName() + " " + idi.portName;
                    Assert.assertEquals("number stream codecs " + id, idi.streamCodecs.size(), 1);
                    checkPresentStreamCodec(n3meta, node3.inport1, idi.streamCodecs, id, plan);
                }
            } else {
                OperatorDeployInfo odi = getOperatorDeployInfo(operator, operator.getName(), dnm);
                Assert.assertEquals("unifier outputs " + operator.getName(), 1, operator.getOutputs().size());
                PTOperator.PTOutput out = operator.getOutputs().get(0);
                Assert.assertEquals("unifier sinks " + operator.getName(), 1, out.sinks.size());
                PTOperator.PTInput idInput = out.sinks.get(0);
                LogicalPlan.OperatorMeta idMeta = idInput.target.getOperatorMeta();
                Operator.InputPort<?> idInputPort = null;
                if (idMeta == n2meta) {
                    idInputPort = node2.inport1;
                } else if (idMeta == n3meta) {
                    idInputPort = node3.inport1;
                }
                List<OperatorDeployInfo.InputDeployInfo> idis = odi.inputs;
                for (OperatorDeployInfo.InputDeployInfo idi : idis) {
                    String id = operator.getName() + " " + idi.portName;
                    Assert.assertEquals("number stream codecs " + id, idi.streamCodecs.size(), 1);
                    checkPresentStreamCodec(idMeta, idInputPort, idi.streamCodecs, id, plan);
                }
            }
        }
    }
}
Also used : GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) Operator(com.datatorrent.api.Operator) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) PhysicalPlan(com.datatorrent.stram.plan.physical.PhysicalPlan) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) PTContainer(com.datatorrent.stram.plan.physical.PTContainer) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan)

Example 55 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class PartitioningTest method testDynamicDefaultPartitioning.

//@Ignore
@Test
@SuppressWarnings("SleepWhileInLoop")
public void testDynamicDefaultPartitioning() throws Exception {
    LogicalPlan dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 5);
    File checkpointDir = new File(TEST_OUTPUT_DIR, "testDynamicDefaultPartitioning");
    dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new AsyncFSStorageAgent(checkpointDir.getPath(), null));
    CollectorOperator.receivedTuples.clear();
    TestInputOperator<Integer> input = dag.addOperator("input", new TestInputOperator<Integer>());
    input.blockEndStream = true;
    CollectorOperator collector = dag.addOperator("partitionedCollector", new CollectorOperator());
    collector.prefix = "" + System.identityHashCode(collector);
    dag.setOperatorAttribute(collector, OperatorContext.PARTITIONER, new StatelessPartitioner<CollectorOperator>(2));
    dag.setOperatorAttribute(collector, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitionLoadWatch() }));
    dag.addStream("fromInput", input.output, collector.input);
    CollectorOperator singleCollector = dag.addOperator("singleCollector", new CollectorOperator());
    singleCollector.prefix = "" + System.identityHashCode(singleCollector);
    dag.addStream("toSingleCollector", collector.output, singleCollector.input);
    StramLocalCluster lc = new StramLocalCluster(dag);
    lc.setHeartbeatMonitoringEnabled(false);
    lc.runAsync();
    List<PTOperator> partitions = assertNumberPartitions(2, lc, dag.getMeta(collector));
    Set<PTContainer> containers = Sets.newHashSet();
    for (PTOperator oper : partitions) {
        containers.add(oper.getContainer());
    }
    Assert.assertTrue("Number of containers are 4", 4 == lc.dnmgr.getPhysicalPlan().getContainers().size());
    PTOperator splitPartition = partitions.get(0);
    PartitionLoadWatch.put(splitPartition, 1);
    LOG.debug("Triggered split for {}", splitPartition);
    int count = 0;
    long startMillis = System.currentTimeMillis();
    while (count == 0 && startMillis > System.currentTimeMillis() - StramTestSupport.DEFAULT_TIMEOUT_MILLIS) {
        // yield
        sleep(20);
        count += lc.dnmgr.processEvents();
    }
    partitions = assertNumberPartitions(3, lc, dag.getMeta(collector));
    Assert.assertTrue("container reused", lc.dnmgr.getPhysicalPlan().getContainers().containsAll(containers));
    // check deployment
    for (PTOperator p : partitions) {
        StramTestSupport.waitForActivation(lc, p);
    }
    PartitionLoadWatch.remove(splitPartition);
    for (PTContainer container : lc.dnmgr.getPhysicalPlan().getContainers()) {
        int memory = 0;
        for (PTOperator operator : container.getOperators()) {
            memory += operator.getBufferServerMemory();
            memory += operator.getOperatorMeta().getValue(OperatorContext.MEMORY_MB);
        }
        Assert.assertEquals("memory", memory, container.getRequiredMemoryMB());
    }
    PTOperator planInput = lc.findByLogicalNode(dag.getMeta(input));
    LocalStreamingContainer c = StramTestSupport.waitForActivation(lc, planInput);
    Map<Integer, Node<?>> nodeMap = c.getNodes();
    Assert.assertEquals("number operators " + nodeMap, 1, nodeMap.size());
    @SuppressWarnings({ "unchecked" }) TestInputOperator<Integer> inputDeployed = (TestInputOperator<Integer>) nodeMap.get(planInput.getId()).getOperator();
    Assert.assertNotNull("" + nodeMap, inputDeployed);
    // add tuple that matches the partition key and check that each partition receives it
    ArrayList<Integer> inputTuples = new ArrayList<>();
    LOG.debug("Number of partitions {}", partitions.size());
    for (PTOperator p : partitions) {
        // default partitioning has one port mapping with a single partition key
        LOG.debug("Partition key map size: {}", p.getPartitionKeys().size());
        inputTuples.add(p.getPartitionKeys().values().iterator().next().partitions.iterator().next());
    }
    inputDeployed.testTuples = Collections.synchronizedList(new ArrayList<List<Integer>>());
    inputDeployed.testTuples.add(inputTuples);
    for (PTOperator p : partitions) {
        Integer expectedTuple = p.getPartitionKeys().values().iterator().next().partitions.iterator().next();
        List<Object> receivedTuples;
        int i = 0;
        while ((receivedTuples = CollectorOperator.receivedTuples.get(collector.prefix + p.getId())) == null || receivedTuples.isEmpty()) {
            if (i++ % 100 == 0) {
                LOG.debug("Waiting for tuple: " + p);
            }
            sleep(10);
        }
        Assert.assertEquals("received " + p, Arrays.asList(expectedTuple), receivedTuples);
    }
    // single output operator to receive tuple from each partition
    List<PTOperator> operators = lc.getPlanOperators(dag.getMeta(singleCollector));
    Assert.assertEquals("number output operator instances " + operators, 1, operators.size());
    // ensure redeploy
    StramTestSupport.waitForActivation(lc, operators.get(0));
    List<Object> receivedTuples;
    while ((receivedTuples = CollectorOperator.receivedTuples.get(singleCollector.prefix + operators.get(0).getId())) == null || receivedTuples.size() < inputTuples.size()) {
        LOG.debug("Waiting for tuple: " + operators.get(0) + " expected: " + inputTuples + " received: " + receivedTuples);
        sleep(20);
    }
    Assert.assertEquals("output tuples " + receivedTuples, Sets.newHashSet(inputTuples), Sets.newHashSet(receivedTuples));
    lc.shutdown();
}
Also used : Node(com.datatorrent.stram.engine.Node) ArrayList(java.util.ArrayList) AsyncFSStorageAgent(com.datatorrent.common.util.AsyncFSStorageAgent) PTContainer(com.datatorrent.stram.plan.physical.PTContainer) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) LocalStreamingContainer(com.datatorrent.stram.StramLocalCluster.LocalStreamingContainer) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) File(java.io.File) Test(org.junit.Test)

Aggregations

LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)99 Test (org.junit.Test)84 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)40 TestPlanContext (com.datatorrent.stram.plan.TestPlanContext)29 PartitioningTest (com.datatorrent.stram.PartitioningTest)27 File (java.io.File)23 OperatorMeta (com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta)22 StramLocalCluster (com.datatorrent.stram.StramLocalCluster)19 Checkpoint (com.datatorrent.stram.api.Checkpoint)17 MemoryStorageAgent (com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent)16 AsyncFSStorageAgent (com.datatorrent.common.util.AsyncFSStorageAgent)15 StatsListener (com.datatorrent.api.StatsListener)13 StramTestSupport (com.datatorrent.stram.support.StramTestSupport)13 Configuration (org.apache.hadoop.conf.Configuration)13 LogicalPlanConfiguration (com.datatorrent.stram.plan.logical.LogicalPlanConfiguration)11 NodeReport (org.apache.hadoop.yarn.api.records.NodeReport)10 ContainerStartRequest (com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest)9 PTOperator (com.datatorrent.stram.plan.physical.PTOperator)9 ArrayList (java.util.ArrayList)9 ConstraintViolationException (javax.validation.ConstraintViolationException)9