Search in sources :

Example 51 with ApplicationManager

use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.

the class TestFrameworkTestRun method testFlowRuntimeArguments.

@Test
public void testFlowRuntimeArguments() throws Exception {
    ApplicationManager applicationManager = deployApplication(FilterAppWithNewFlowAPI.class);
    Map<String, String> args = Maps.newHashMap();
    args.put("threshold", "10");
    applicationManager.getFlowManager("FilterFlow").start(args);
    StreamManager input = getStreamManager("input");
    input.send("2");
    input.send("21");
    ServiceManager serviceManager = applicationManager.getServiceManager("CountService").start();
    serviceManager.waitForStatus(true, 2, 1);
    Assert.assertEquals("1", new Gson().fromJson(callServiceGet(serviceManager.getServiceURL(), "result"), String.class));
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) StreamManager(co.cask.cdap.test.StreamManager) ServiceManager(co.cask.cdap.test.ServiceManager) Gson(com.google.gson.Gson) Test(org.junit.Test)

Example 52 with ApplicationManager

use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.

the class TestFrameworkTestRun method testAppWithPlugin.

@Test
public void testAppWithPlugin() throws Exception {
    ArtifactId artifactId = NamespaceId.DEFAULT.artifact("app-with-plugin", "1.0.0-SNAPSHOT");
    addAppArtifact(artifactId, AppWithPlugin.class);
    ArtifactId pluginArtifactId = NamespaceId.DEFAULT.artifact("test-plugin", "1.0.0-SNAPSHOT");
    addPluginArtifact(pluginArtifactId, artifactId, ToStringPlugin.class);
    ApplicationId appId = NamespaceId.DEFAULT.app("AppWithPlugin");
    AppRequest createRequest = new AppRequest(new ArtifactSummary(artifactId.getArtifact(), artifactId.getVersion()));
    ApplicationManager appManager = deployApplication(appId, createRequest);
    final WorkerManager workerManager = appManager.getWorkerManager(AppWithPlugin.WORKER);
    workerManager.start();
    workerManager.waitForStatus(false, 5, 1);
    Tasks.waitFor(false, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return workerManager.getHistory(ProgramRunStatus.COMPLETED).isEmpty();
        }
    }, 5, TimeUnit.SECONDS, 10, TimeUnit.MILLISECONDS);
    final ServiceManager serviceManager = appManager.getServiceManager(AppWithPlugin.SERVICE);
    serviceManager.start();
    serviceManager.waitForStatus(true, 1, 10);
    URL serviceURL = serviceManager.getServiceURL(5, TimeUnit.SECONDS);
    callServiceGet(serviceURL, "dummy");
    serviceManager.stop();
    serviceManager.waitForStatus(false, 1, 10);
    Tasks.waitFor(false, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return serviceManager.getHistory(ProgramRunStatus.KILLED).isEmpty();
        }
    }, 5, TimeUnit.SECONDS, 10, TimeUnit.MILLISECONDS);
    WorkflowManager workflowManager = appManager.getWorkflowManager(AppWithPlugin.WORKFLOW);
    workflowManager.start();
    workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
    List<RunRecord> runRecords = workflowManager.getHistory();
    Assert.assertNotEquals(ProgramRunStatus.FAILED, runRecords.get(0).getStatus());
    DataSetManager<KeyValueTable> workflowTableManager = getDataset(AppWithPlugin.WORKFLOW_TABLE);
    String value = Bytes.toString(workflowTableManager.get().read("val"));
    Assert.assertEquals(AppWithPlugin.TEST, value);
    Map<String, String> workflowTags = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, NamespaceId.DEFAULT.getNamespace(), Constants.Metrics.Tag.APP, "AppWithPlugin", Constants.Metrics.Tag.WORKFLOW, AppWithPlugin.WORKFLOW, Constants.Metrics.Tag.RUN_ID, runRecords.get(0).getPid());
    getMetricsManager().waitForTotalMetricCount(workflowTags, String.format("user.destroy.%s", AppWithPlugin.WORKFLOW), 1, 60, TimeUnit.SECONDS);
    // Testing Spark Plugins. First send some data to stream for the Spark program to process
    StreamManager streamManager = getStreamManager(AppWithPlugin.SPARK_STREAM);
    for (int i = 0; i < 5; i++) {
        streamManager.send("Message " + i);
    }
    SparkManager sparkManager = appManager.getSparkManager(AppWithPlugin.SPARK).start();
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 2, TimeUnit.MINUTES);
    // Verify the Spark result.
    DataSetManager<Table> dataSetManager = getDataset(AppWithPlugin.SPARK_TABLE);
    Table table = dataSetManager.get();
    try (Scanner scanner = table.scan(null, null)) {
        for (int i = 0; i < 5; i++) {
            Row row = scanner.next();
            Assert.assertNotNull(row);
            String expected = "Message " + i + " " + AppWithPlugin.TEST;
            Assert.assertEquals(expected, Bytes.toString(row.getRow()));
            Assert.assertEquals(expected, Bytes.toString(row.get(expected)));
        }
        // There shouldn't be any more rows in the table.
        Assert.assertNull(scanner.next());
    }
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) ApplicationManager(co.cask.cdap.test.ApplicationManager) ArtifactId(co.cask.cdap.proto.id.ArtifactId) WorkflowManager(co.cask.cdap.test.WorkflowManager) URL(java.net.URL) ServiceManager(co.cask.cdap.test.ServiceManager) SparkManager(co.cask.cdap.test.SparkManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) Table(co.cask.cdap.api.dataset.table.Table) ConflictException(co.cask.cdap.common.ConflictException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) AppRequest(co.cask.cdap.proto.artifact.AppRequest) WorkerManager(co.cask.cdap.test.WorkerManager) RunRecord(co.cask.cdap.proto.RunRecord) ArtifactSummary(co.cask.cdap.api.artifact.ArtifactSummary) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) StreamManager(co.cask.cdap.test.StreamManager) Row(co.cask.cdap.api.dataset.table.Row) ApplicationId(co.cask.cdap.proto.id.ApplicationId) Test(org.junit.Test)

Example 53 with ApplicationManager

use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.

the class MessagingAppTestRun method testSparkMessaging.

@Test
public void testSparkMessaging() throws Exception {
    ApplicationManager appManager = deployWithArtifact(NAMESPACE, MessagingApp.class, artifactJar);
    final SparkManager sparkManager = appManager.getSparkManager(MessagingSpark.class.getSimpleName()).start();
    final MessageFetcher fetcher = getMessagingContext().getMessageFetcher();
    final AtomicReference<String> messageId = new AtomicReference<>();
    // Wait for the Spark to create the topic
    final MessagingAdmin messagingAdmin = getMessagingAdmin(NAMESPACE.getNamespace());
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            try {
                messagingAdmin.getTopicProperties(MessagingApp.TOPIC);
                return true;
            } catch (TopicNotFoundException e) {
                return false;
            }
        }
    }, 60, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    // This is to verify failed transaction is not publishing anything.
    for (String expected : Arrays.asList("start", "block")) {
        Tasks.waitFor(expected, new Callable<String>() {

            @Override
            public String call() throws Exception {
                try (CloseableIterator<Message> iterator = fetcher.fetch(NAMESPACE.getNamespace(), MessagingApp.TOPIC, 1, messageId.get())) {
                    if (!iterator.hasNext()) {
                        return null;
                    }
                    Message message = iterator.next();
                    messageId.set(message.getId());
                    return message.getPayloadAsString();
                }
            }
        }, 60, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    }
    // Publish a control message to unblock the Spark execution
    getMessagingContext().getMessagePublisher().publish(NAMESPACE.getNamespace(), MessagingApp.CONTROL_TOPIC, "go");
    // Expects a result message as "result-15", where 15 is the sum of 1,2,3,4,5
    Tasks.waitFor("result-15", new Callable<String>() {

        @Override
        public String call() throws Exception {
            try (CloseableIterator<Message> iterator = fetcher.fetch(NAMESPACE.getNamespace(), MessagingApp.TOPIC, 1, messageId.get())) {
                if (!iterator.hasNext()) {
                    return null;
                }
                Message message = iterator.next();
                messageId.set(message.getId());
                return message.getPayloadAsString();
            }
        }
    }, 60, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) MessageFetcher(co.cask.cdap.api.messaging.MessageFetcher) CloseableIterator(co.cask.cdap.api.dataset.lib.CloseableIterator) SparkManager(co.cask.cdap.test.SparkManager) Message(co.cask.cdap.api.messaging.Message) TopicNotFoundException(co.cask.cdap.api.messaging.TopicNotFoundException) AtomicReference(java.util.concurrent.atomic.AtomicReference) TopicNotFoundException(co.cask.cdap.api.messaging.TopicNotFoundException) TimeoutException(java.util.concurrent.TimeoutException) MessagingAdmin(co.cask.cdap.api.messaging.MessagingAdmin) Test(org.junit.Test)

Example 54 with ApplicationManager

use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.

the class TestFrameworkTestRun method testConcurrentRuns.

@Test
public void testConcurrentRuns() throws Exception {
    ApplicationManager appManager = deployApplication(ConcurrentRunTestApp.class);
    WorkerManager workerManager = appManager.getWorkerManager(ConcurrentRunTestApp.TestWorker.class.getSimpleName());
    workerManager.start();
    // Start another time should fail as worker doesn't support concurrent run.
    workerManager.waitForStatus(true);
    try {
        workerManager.start();
        Assert.fail("Expected failure to start worker");
    } catch (Exception e) {
        Assert.assertTrue(Throwables.getRootCause(e) instanceof ConflictException);
    }
    workerManager.stop();
    // Start the workflow
    File tmpDir = TEMP_FOLDER.newFolder();
    File actionFile = new File(tmpDir, "action.file");
    Map<String, String> args = Collections.singletonMap("action.file", actionFile.getAbsolutePath());
    WorkflowManager workflowManager = appManager.getWorkflowManager(ConcurrentRunTestApp.TestWorkflow.class.getSimpleName());
    // Starts two runs, both should succeed
    workflowManager.start(args);
    workflowManager.start(args);
    // Should get two active runs
    workflowManager.waitForRuns(ProgramRunStatus.RUNNING, 2, 10L, TimeUnit.SECONDS);
    // Touch the file to complete the workflow runs
    Files.touch(actionFile);
    workflowManager.waitForRuns(ProgramRunStatus.COMPLETED, 2, 10L, TimeUnit.SECONDS);
}
Also used : WorkerManager(co.cask.cdap.test.WorkerManager) ApplicationManager(co.cask.cdap.test.ApplicationManager) ConflictException(co.cask.cdap.common.ConflictException) WorkflowManager(co.cask.cdap.test.WorkflowManager) File(java.io.File) ConflictException(co.cask.cdap.common.ConflictException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 55 with ApplicationManager

use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.

the class TestFrameworkTestRun method testClusterName.

@Test
public void testClusterName() throws Exception {
    String clusterName = getConfiguration().get(Constants.CLUSTER_NAME);
    ApplicationManager appManager = deployApplication(ClusterNameTestApp.class);
    final DataSetManager<KeyValueTable> datasetManager = getDataset(ClusterNameTestApp.CLUSTER_NAME_TABLE);
    final KeyValueTable clusterNameTable = datasetManager.get();
    // A callable for reading the cluster name from the ClusterNameTable.
    // It is used for Tasks.waitFor call down below.
    final AtomicReference<String> key = new AtomicReference<>();
    Callable<String> readClusterName = new Callable<String>() {

        @Nullable
        @Override
        public String call() throws Exception {
            datasetManager.flush();
            byte[] bytes = clusterNameTable.read(key.get());
            return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
        }
    };
    // Service
    ServiceManager serviceManager = appManager.getServiceManager(ClusterNameTestApp.ClusterNameServiceHandler.class.getSimpleName()).start();
    Assert.assertEquals(clusterName, callServiceGet(serviceManager.getServiceURL(10, TimeUnit.SECONDS), "clusterName"));
    serviceManager.stop();
    // Worker
    WorkerManager workerManager = appManager.getWorkerManager(ClusterNameTestApp.ClusterNameWorker.class.getSimpleName()).start();
    key.set("worker.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    // The worker will stop by itself. No need to call stop
    workerManager.waitForRun(ProgramRunStatus.COMPLETED, 10, TimeUnit.SECONDS);
    // Flow
    FlowManager flowManager = appManager.getFlowManager(ClusterNameTestApp.ClusterNameFlow.class.getSimpleName()).start();
    key.set("flow.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    flowManager.stop();
    // MapReduce
    // Setup the input file used by MR
    Location location = this.<FileSet>getDataset(ClusterNameTestApp.INPUT_FILE_SET).get().getLocation("input");
    try (PrintStream printer = new PrintStream(location.getOutputStream(), true, "UTF-8")) {
        for (int i = 0; i < 10; i++) {
            printer.println("Hello World " + i);
        }
    }
    // Setup input and output dataset arguments
    Map<String, String> inputArgs = new HashMap<>();
    FileSetArguments.setInputPath(inputArgs, "input");
    Map<String, String> outputArgs = new HashMap<>();
    FileSetArguments.setOutputPath(outputArgs, "output");
    Map<String, String> args = new HashMap<>();
    args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.INPUT_FILE_SET, inputArgs));
    args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.OUTPUT_FILE_SET, outputArgs));
    MapReduceManager mrManager = appManager.getMapReduceManager(ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName()).start(args);
    key.set("mr.client.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set("mapper.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set("reducer.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    mrManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
    // Spark
    SparkManager sparkManager = appManager.getSparkManager(ClusterNameTestApp.ClusterNameSpark.class.getSimpleName()).start();
    key.set("spark.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
    // Workflow
    // Cleanup the output path for the MR job in the workflow first
    this.<FileSet>getDataset(ClusterNameTestApp.OUTPUT_FILE_SET).get().getLocation("output").delete(true);
    args = RuntimeArguments.addScope(Scope.MAPREDUCE, ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName(), args);
    WorkflowManager workflowManager = appManager.getWorkflowManager(ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName()).start(args);
    String prefix = ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName() + ".";
    key.set(prefix + "mr.client.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "mapper.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "reducer.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "spark.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "action.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
}
Also used : FlowManager(co.cask.cdap.test.FlowManager) PrintStream(java.io.PrintStream) ApplicationManager(co.cask.cdap.test.ApplicationManager) SparkManager(co.cask.cdap.test.SparkManager) FileSet(co.cask.cdap.api.dataset.lib.FileSet) MapReduceManager(co.cask.cdap.test.MapReduceManager) HashMap(java.util.HashMap) WorkflowManager(co.cask.cdap.test.WorkflowManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) Callable(java.util.concurrent.Callable) WorkerManager(co.cask.cdap.test.WorkerManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) ServiceManager(co.cask.cdap.test.ServiceManager) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Aggregations

ApplicationManager (co.cask.cdap.test.ApplicationManager)188 Test (org.junit.Test)155 KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)88 ApplicationId (co.cask.cdap.proto.id.ApplicationId)71 AppRequest (co.cask.cdap.proto.artifact.AppRequest)61 WorkflowManager (co.cask.cdap.test.WorkflowManager)59 ETLStage (co.cask.cdap.etl.proto.v2.ETLStage)58 SparkManager (co.cask.cdap.test.SparkManager)52 Table (co.cask.cdap.api.dataset.table.Table)50 ServiceManager (co.cask.cdap.test.ServiceManager)48 StructuredRecord (co.cask.cdap.api.data.format.StructuredRecord)47 Schema (co.cask.cdap.api.data.schema.Schema)47 ETLBatchConfig (co.cask.cdap.etl.proto.v2.ETLBatchConfig)45 StreamManager (co.cask.cdap.test.StreamManager)43 URL (java.net.URL)33 HashSet (java.util.HashSet)27 ArrayList (java.util.ArrayList)26 IOException (java.io.IOException)25 HashMap (java.util.HashMap)24 Set (java.util.Set)24