Search in sources :

Example 41 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class OiOStreamTest method validateNegativeOiOiOdiamond.

@Test
public void validateNegativeOiOiOdiamond() {
    logger.info("Checking the logic for sanity checking of OiO");
    LogicalPlan plan = new LogicalPlan();
    ThreadIdValidatingInputOperator inputOperator = plan.addOperator("inputOperator", new ThreadIdValidatingInputOperator());
    ThreadIdValidatingGenericIntermediateOperator intermediateOperator1 = plan.addOperator("intermediateOperator1", new ThreadIdValidatingGenericIntermediateOperator());
    ThreadIdValidatingGenericIntermediateOperator intermediateOperator2 = plan.addOperator("intermediateOperator2", new ThreadIdValidatingGenericIntermediateOperator());
    ThreadIdValidatingGenericOperatorWithTwoInputPorts outputOperator = plan.addOperator("outputOperator", new ThreadIdValidatingGenericOperatorWithTwoInputPorts());
    plan.addStream("OiOin", inputOperator.output, intermediateOperator1.input, intermediateOperator2.input).setLocality(Locality.THREAD_LOCAL);
    plan.addStream("OiOout1", intermediateOperator1.output, outputOperator.input).setLocality(Locality.THREAD_LOCAL);
    plan.addStream("nonOiOout2", intermediateOperator2.output, outputOperator.input2).setLocality(null);
    try {
        plan.validate();
        Assert.fail("OIOIO negative diamond");
    } catch (ConstraintViolationException ex) {
        Assert.assertTrue("OIOIO negative diamond", true);
    } catch (ValidationException ex) {
        Assert.assertTrue("OIOIO negative diamond", true);
    }
}
Also used : ValidationException(javax.validation.ValidationException) ConstraintViolationException(javax.validation.ConstraintViolationException) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) Test(org.junit.Test)

Example 42 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class OperatorDiscoveryTest method testLogicalPlanConfiguration.

@Test
public void testLogicalPlanConfiguration() throws Exception {
    TestOperator<String, Map<String, Number>> bean = new InputTestOperator<>();
    bean.map.put("key1", new Structured());
    bean.stringArray = new String[] { "one", "two", "three" };
    bean.stringList = Lists.newArrayList("four", "five");
    bean.props = new Properties();
    bean.props.setProperty("key1", "value1");
    bean.structuredArray = new Structured[] { new Structured() };
    bean.genericArray = new String[] { "s1" };
    bean.structuredArray[0].name = "s1";
    bean.color = Color.BLUE;
    bean.booleanProp = true;
    bean.realName = "abc";
    ObjectMapper mapper = ObjectMapperFactory.getOperatorValueSerializer();
    String s = mapper.writeValueAsString(bean);
    //    LOG.debug(new JSONObject(s).toString(2));
    //
    Assert.assertTrue("Shouldn't contain field 'realName' !", !s.contains("realName"));
    Assert.assertTrue("Should contain property 'alias' !", s.contains("alias"));
    Assert.assertTrue("Shouldn't contain property 'getterOnly' !", !s.contains("getterOnly"));
    JSONObject jsonObj = new JSONObject(s);
    // create the json dag representation
    JSONObject jsonPlan = new JSONObject();
    jsonPlan.put("streams", new JSONArray());
    JSONObject jsonOper = new JSONObject();
    jsonOper.put("name", "Test Operator");
    jsonOper.put("class", InputTestOperator.class.getName());
    jsonOper.put("properties", jsonObj);
    jsonPlan.put("operators", new JSONArray(Lists.newArrayList(jsonOper)));
    Configuration conf = new Configuration(false);
    LogicalPlanConfiguration lpc = new LogicalPlanConfiguration(conf);
    // create logical plan from the json
    LogicalPlan lp = lpc.createFromJson(jsonPlan, "jsontest");
    OperatorMeta om = lp.getOperatorMeta("Test Operator");
    Assert.assertTrue(om.getOperator() instanceof InputTestOperator);
    @SuppressWarnings("rawtypes") TestOperator beanBack = (InputTestOperator) om.getOperator();
    // The operator deserialized back from json should be same as original operator
    Assert.assertEquals(bean.map, beanBack.map);
    Assert.assertArrayEquals(bean.stringArray, beanBack.stringArray);
    Assert.assertEquals(bean.stringList, beanBack.stringList);
    Assert.assertEquals(bean.props, beanBack.props);
    Assert.assertArrayEquals(bean.structuredArray, beanBack.structuredArray);
    Assert.assertArrayEquals(bean.genericArray, beanBack.genericArray);
    Assert.assertEquals(bean.color, beanBack.color);
    Assert.assertEquals(bean.booleanProp, beanBack.booleanProp);
    Assert.assertEquals(bean.realName, beanBack.realName);
    Assert.assertEquals(bean.getterOnly, beanBack.getterOnly);
}
Also used : LogicalPlanConfiguration(com.datatorrent.stram.plan.logical.LogicalPlanConfiguration) Configuration(org.apache.hadoop.conf.Configuration) OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) JSONArray(org.codehaus.jettison.json.JSONArray) Properties(java.util.Properties) LogicalPlanConfiguration(com.datatorrent.stram.plan.logical.LogicalPlanConfiguration) JSONObject(org.codehaus.jettison.json.JSONObject) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) HashMap(java.util.HashMap) Map(java.util.Map) AbstractMap(java.util.AbstractMap) ObjectMapper(org.codehaus.jackson.map.ObjectMapper) Test(org.junit.Test)

Example 43 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class PhysicalPlanTest method testInputOperatorPartitioning.

/**
   * Test partitioning of an input operator (no input port).
   * Cover aspects that are not part of generic operator test.
   * Test scaling from one to multiple partitions with unifier when one partition remains unmodified.
   */
@Test
public void testInputOperatorPartitioning() {
    LogicalPlan dag = new LogicalPlan();
    final TestInputOperator<Object> o1 = dag.addOperator("o1", new TestInputOperator<>());
    GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
    dag.addStream("o1.outport1", o1.output, o2.inport1);
    OperatorMeta o1Meta = dag.getMeta(o1);
    dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
    TestPartitioner<TestInputOperator<Object>> partitioner = new TestPartitioner<>();
    dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, partitioner);
    TestPlanContext ctx = new TestPlanContext();
    dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    Assert.assertEquals("number of containers", 2, plan.getContainers().size());
    List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
    Assert.assertEquals("partitions " + o1Partitions, 1, o1Partitions.size());
    PTOperator o1p1 = o1Partitions.get(0);
    // verify load update generates expected events per configuration
    Assert.assertEquals("stats handlers " + o1p1, 1, o1p1.statsListeners.size());
    StatsListener l = o1p1.statsListeners.get(0);
    Assert.assertTrue("stats handlers " + o1p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
    PartitioningTest.PartitionLoadWatch.put(o1p1, 1);
    plan.onStatusUpdate(o1p1);
    Assert.assertEquals("scale up triggered", 1, ctx.events.size());
    // add another partition, keep existing as is
    partitioner.extraPartitions.add(new DefaultPartition<>(o1));
    Runnable r = ctx.events.remove(0);
    r.run();
    partitioner.extraPartitions.clear();
    o1Partitions = plan.getOperators(o1Meta);
    Assert.assertEquals("operators after scale up", 2, o1Partitions.size());
    Assert.assertEquals("first partition unmodified", o1p1, o1Partitions.get(0));
    Assert.assertEquals("single output", 1, o1p1.getOutputs().size());
    Assert.assertEquals("output to unifier", 1, o1p1.getOutputs().get(0).sinks.size());
    Set<PTOperator> expUndeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(o2)));
    Set<PTOperator> expDeploy = Sets.newHashSet(o1Partitions.get(1));
    expDeploy.addAll(plan.getMergeOperators(dag.getMeta(o1)));
    expDeploy.addAll(expUndeploy);
    expDeploy.add(o1p1.getOutputs().get(0).sinks.get(0).target);
    Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
    Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    for (PTOperator p : o1Partitions) {
        Assert.assertEquals("activation window id " + p, Checkpoint.INITIAL_CHECKPOINT, p.recoveryCheckpoint);
        Assert.assertEquals("checkpoints " + p + " " + p.checkpoints, Lists.newArrayList(), p.checkpoints);
        PartitioningTest.PartitionLoadWatch.put(p, -1);
        plan.onStatusUpdate(p);
    }
    ctx.events.remove(0).run();
    Assert.assertEquals("operators after scale down", 1, plan.getOperators(o1Meta).size());
}
Also used : OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) StatsListener(com.datatorrent.api.StatsListener) TestInputOperator(com.datatorrent.stram.PartitioningTest.TestInputOperator) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) PartitioningTest(com.datatorrent.stram.PartitioningTest) TestPlanContext(com.datatorrent.stram.plan.TestPlanContext) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) Test(org.junit.Test) PartitioningTest(com.datatorrent.stram.PartitioningTest)

Example 44 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class PartitioningTest method testDefaultPartitioning.

@Test
public void testDefaultPartitioning() throws Exception {
    LogicalPlan dag = new LogicalPlan();
    File checkpointDir = new File(TEST_OUTPUT_DIR, "testDefaultPartitioning");
    dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, new AsyncFSStorageAgent(checkpointDir.getPath(), null));
    Integer[][] testData = { { 4, 5 } };
    CollectorOperator.receivedTuples.clear();
    TestInputOperator<Integer> input = dag.addOperator("input", new TestInputOperator<Integer>());
    input.testTuples = new ArrayList<>();
    for (Integer[] tuples : testData) {
        input.testTuples.add(new ArrayList<>(Arrays.asList(tuples)));
    }
    CollectorOperator collector = dag.addOperator("collector", new CollectorOperator());
    collector.prefix = "" + System.identityHashCode(collector);
    dag.getMeta(collector).getAttributes().put(OperatorContext.PARTITIONER, new StatelessPartitioner<CollectorOperator>(2));
    dag.addStream("fromInput", input.output, collector.input);
    CollectorOperator merged = dag.addOperator("merged", new CollectorOperator());
    merged.prefix = "" + System.identityHashCode(merged);
    dag.addStream("toMerged", collector.output, merged.input);
    StramLocalCluster lc = new StramLocalCluster(dag);
    lc.setHeartbeatMonitoringEnabled(false);
    // terminates on end of stream
    lc.run();
    List<PTOperator> operators = lc.getPlanOperators(dag.getMeta(collector));
    Assert.assertEquals("number operator instances " + operators, 2, operators.size());
    // one entry for each partition + merged output
    Assert.assertEquals("received tuples " + CollectorOperator.receivedTuples, 3, CollectorOperator.receivedTuples.size());
    //Assert.assertEquals("received tuples " + operators.get(0), Arrays.asList(4), CollectorOperator.receivedTuples.get(collector.prefix + operators.get(0).getId()));
    Assert.assertEquals("received tuples " + operators.get(1), Arrays.asList(5), CollectorOperator.receivedTuples.get(collector.prefix + operators.get(1).getId()));
    PTOperator pmerged = lc.findByLogicalNode(dag.getMeta(merged));
    List<Object> tuples = CollectorOperator.receivedTuples.get(merged.prefix + pmerged.getId());
    Assert.assertNotNull("merged tuples " + pmerged, tuples);
    Assert.assertEquals("merged tuples " + pmerged, Sets.newHashSet(testData[0]), Sets.newHashSet(tuples));
}
Also used : PTOperator(com.datatorrent.stram.plan.physical.PTOperator) AsyncFSStorageAgent(com.datatorrent.common.util.AsyncFSStorageAgent) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) File(java.io.File) Test(org.junit.Test)

Example 45 with LogicalPlan

use of com.datatorrent.stram.plan.logical.LogicalPlan in project apex-core by apache.

the class StramRecoveryTest method testRestartApp.

private void testRestartApp(StorageAgent agent, String appPath1) throws Exception {
    String appId1 = "app1";
    String appId2 = "app2";
    String appPath2 = testMeta.getPath() + "/" + appId2;
    dag.setAttribute(LogicalPlan.APPLICATION_ID, appId1);
    dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath1);
    dag.setAttribute(LogicalPlan.APPLICATION_ATTEMPT_ID, 1);
    dag.setAttribute(OperatorContext.STORAGE_AGENT, agent);
    dag.addOperator("o1", StatsListeningOperator.class);
    FSRecoveryHandler recoveryHandler = new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false));
    StreamingContainerManager.getInstance(recoveryHandler, dag, false);
    // test restore initial snapshot + log
    dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath1);
    StreamingContainerManager scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
    PhysicalPlan plan = scm.getPhysicalPlan();
    // original plan
    dag = plan.getLogicalPlan();
    Assert.assertNotNull("operator", dag.getOperatorMeta("o1"));
    PTOperator o1p1 = plan.getOperators(dag.getOperatorMeta("o1")).get(0);
    long[] ids = new FSStorageAgent(appPath1 + "/" + LogicalPlan.SUBDIR_CHECKPOINTS, new Configuration()).getWindowIds(o1p1.getId());
    Assert.assertArrayEquals(new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
    Assert.assertNull(o1p1.getContainer().getExternalId());
    // trigger journal write
    o1p1.getContainer().setExternalId("cid1");
    scm.writeJournal(o1p1.getContainer().getSetContainerState());
    /* simulate application restart from app1 */
    dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath2);
    dag.setAttribute(LogicalPlan.APPLICATION_ID, appId2);
    StramClient sc = new StramClient(new Configuration(), dag);
    try {
        sc.start();
        sc.copyInitialState(new Path(appPath1));
    } finally {
        sc.stop();
    }
    scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
    plan = scm.getPhysicalPlan();
    dag = plan.getLogicalPlan();
    assertEquals("modified appId", appId2, dag.getValue(LogicalPlan.APPLICATION_ID));
    assertEquals("modified appPath", appPath2, dag.getValue(LogicalPlan.APPLICATION_PATH));
    Assert.assertNotNull("operator", dag.getOperatorMeta("o1"));
    o1p1 = plan.getOperators(dag.getOperatorMeta("o1")).get(0);
    assertEquals("journal copied", "cid1", o1p1.getContainer().getExternalId());
    CascadeStorageAgent csa = (CascadeStorageAgent) dag.getAttributes().get(OperatorContext.STORAGE_AGENT);
    Assert.assertEquals("storage agent is replaced by cascade", csa.getClass(), CascadeStorageAgent.class);
    Assert.assertEquals("current storage agent is of same type", csa.getCurrentStorageAgent().getClass(), agent.getClass());
    Assert.assertEquals("parent storage agent is of same type ", csa.getParentStorageAgent().getClass(), agent.getClass());
    /* parent and current points to expected location */
    Assert.assertEquals(true, ((FSStorageAgent) csa.getParentStorageAgent()).path.contains("app1"));
    Assert.assertEquals(true, ((FSStorageAgent) csa.getCurrentStorageAgent()).path.contains("app2"));
    ids = csa.getWindowIds(o1p1.getId());
    Assert.assertArrayEquals("checkpoints copied", new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
    /* simulate another application restart from app2 */
    String appId3 = "app3";
    String appPath3 = testMeta.getPath() + "/" + appId3;
    dag = new LogicalPlan();
    dag.setAttribute(LogicalPlan.APPLICATION_PATH, appPath3);
    dag.setAttribute(LogicalPlan.APPLICATION_ID, appId3);
    sc = new StramClient(new Configuration(), dag);
    try {
        sc.start();
        // copy state from app2.
        sc.copyInitialState(new Path(appPath2));
    } finally {
        sc.stop();
    }
    scm = StreamingContainerManager.getInstance(new FSRecoveryHandler(dag.assertAppPath(), new Configuration(false)), dag, false);
    plan = scm.getPhysicalPlan();
    dag = plan.getLogicalPlan();
    csa = (CascadeStorageAgent) dag.getAttributes().get(OperatorContext.STORAGE_AGENT);
    Assert.assertEquals("storage agent is replaced by cascade", csa.getClass(), CascadeStorageAgent.class);
    Assert.assertEquals("current storage agent is of same type", csa.getCurrentStorageAgent().getClass(), agent.getClass());
    Assert.assertEquals("parent storage agent is of same type ", csa.getParentStorageAgent().getClass(), CascadeStorageAgent.class);
    CascadeStorageAgent parent = (CascadeStorageAgent) csa.getParentStorageAgent();
    Assert.assertEquals("current storage agent is of same type ", parent.getCurrentStorageAgent().getClass(), agent.getClass());
    Assert.assertEquals("parent storage agent is cascade ", parent.getParentStorageAgent().getClass(), agent.getClass());
    /* verify paths */
    Assert.assertEquals(true, ((FSStorageAgent) parent.getParentStorageAgent()).path.contains("app1"));
    Assert.assertEquals(true, ((FSStorageAgent) parent.getCurrentStorageAgent()).path.contains("app2"));
    Assert.assertEquals(true, ((FSStorageAgent) csa.getCurrentStorageAgent()).path.contains("app3"));
    ids = csa.getWindowIds(o1p1.getId());
    Assert.assertArrayEquals("checkpoints copied", new long[] { o1p1.getRecoveryCheckpoint().getWindowId() }, ids);
}
Also used : Path(org.apache.hadoop.fs.Path) PhysicalPlan(com.datatorrent.stram.plan.physical.PhysicalPlan) Configuration(org.apache.hadoop.conf.Configuration) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) CascadeStorageAgent(org.apache.apex.engine.util.CascadeStorageAgent) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) FSStorageAgent(com.datatorrent.common.util.FSStorageAgent) AsyncFSStorageAgent(com.datatorrent.common.util.AsyncFSStorageAgent)

Aggregations

LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)99 Test (org.junit.Test)84 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)40 TestPlanContext (com.datatorrent.stram.plan.TestPlanContext)29 PartitioningTest (com.datatorrent.stram.PartitioningTest)27 File (java.io.File)23 OperatorMeta (com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta)22 StramLocalCluster (com.datatorrent.stram.StramLocalCluster)19 Checkpoint (com.datatorrent.stram.api.Checkpoint)17 MemoryStorageAgent (com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent)16 AsyncFSStorageAgent (com.datatorrent.common.util.AsyncFSStorageAgent)15 StatsListener (com.datatorrent.api.StatsListener)13 StramTestSupport (com.datatorrent.stram.support.StramTestSupport)13 Configuration (org.apache.hadoop.conf.Configuration)13 LogicalPlanConfiguration (com.datatorrent.stram.plan.logical.LogicalPlanConfiguration)11 NodeReport (org.apache.hadoop.yarn.api.records.NodeReport)10 ContainerStartRequest (com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest)9 PTOperator (com.datatorrent.stram.plan.physical.PTOperator)9 ArrayList (java.util.ArrayList)9 ConstraintViolationException (javax.validation.ConstraintViolationException)9