Search in sources :

Example 6 with WorkflowManager

use of co.cask.cdap.test.WorkflowManager in project cdap by caskdata.

the class TestFrameworkTestRun method testDeployWorkflowApp.

@Category(XSlowTests.class)
@Test
@Ignore
public void testDeployWorkflowApp() throws Exception {
    // Add test back when CDAP-12350 is resolved
    ApplicationManager applicationManager = deployApplication(testSpace, AppWithSchedule.class);
    final WorkflowManager wfmanager = applicationManager.getWorkflowManager(AppWithSchedule.WORKFLOW_NAME);
    List<ScheduleDetail> schedules = wfmanager.getProgramSchedules();
    Assert.assertEquals(2, schedules.size());
    String scheduleName = schedules.get(1).getName();
    Assert.assertNotNull(scheduleName);
    Assert.assertFalse(scheduleName.isEmpty());
    final int initialRuns = wfmanager.getHistory().size();
    LOG.info("initialRuns = {}", initialRuns);
    wfmanager.getSchedule(scheduleName).resume();
    String status = wfmanager.getSchedule(scheduleName).status(200);
    Assert.assertEquals("SCHEDULED", status);
    // Make sure something ran before suspending
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return wfmanager.getHistory().size() > 0;
        }
    }, 15, TimeUnit.SECONDS);
    wfmanager.getSchedule(scheduleName).suspend();
    waitForScheduleState(scheduleName, wfmanager, ProgramScheduleStatus.SUSPENDED);
    // All runs should be completed
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            for (RunRecord record : wfmanager.getHistory()) {
                if (record.getStatus() != ProgramRunStatus.COMPLETED) {
                    return false;
                }
            }
            return true;
        }
    }, 15, TimeUnit.SECONDS);
    List<RunRecord> history = wfmanager.getHistory();
    int workflowRuns = history.size();
    LOG.info("workflowRuns = {}", workflowRuns);
    Assert.assertTrue(workflowRuns > 0);
    // Sleep for some time and verify there are no more scheduled jobs after the suspend.
    TimeUnit.SECONDS.sleep(5);
    final int workflowRunsAfterSuspend = wfmanager.getHistory().size();
    Assert.assertEquals(workflowRuns, workflowRunsAfterSuspend);
    wfmanager.getSchedule(scheduleName).resume();
    // Check that after resume it goes to "SCHEDULED" state
    waitForScheduleState(scheduleName, wfmanager, ProgramScheduleStatus.SCHEDULED);
    // Make sure new runs happens after resume
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return wfmanager.getHistory().size() > workflowRunsAfterSuspend;
        }
    }, 15, TimeUnit.SECONDS);
    // Check scheduled state
    Assert.assertEquals("SCHEDULED", wfmanager.getSchedule(scheduleName).status(200));
    // Check status of non-existent schedule
    Assert.assertEquals("NOT_FOUND", wfmanager.getSchedule("doesnt exist").status(404));
    // Suspend the schedule
    wfmanager.getSchedule(scheduleName).suspend();
    // Check that after suspend it goes to "SUSPENDED" state
    waitForScheduleState(scheduleName, wfmanager, ProgramScheduleStatus.SUSPENDED);
    // Test workflow token while suspended
    String pid = history.get(0).getPid();
    WorkflowTokenDetail workflowToken = wfmanager.getToken(pid, WorkflowToken.Scope.SYSTEM, null);
    Assert.assertEquals(0, workflowToken.getTokenData().size());
    workflowToken = wfmanager.getToken(pid, null, null);
    Assert.assertEquals(2, workflowToken.getTokenData().size());
    // Wait for all workflow runs to finish execution, in case more than one run happened with an enabled schedule
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            for (RunRecord record : wfmanager.getHistory()) {
                if (record.getStatus() != ProgramRunStatus.COMPLETED) {
                    return false;
                }
            }
            return true;
        }
    }, 15, TimeUnit.SECONDS);
    // Verify workflow token after workflow completion
    WorkflowTokenNodeDetail workflowTokenAtNode = wfmanager.getTokenAtNode(pid, AppWithSchedule.DummyAction.class.getSimpleName(), WorkflowToken.Scope.USER, "finished");
    Assert.assertEquals(true, Boolean.parseBoolean(workflowTokenAtNode.getTokenDataAtNode().get("finished")));
    workflowToken = wfmanager.getToken(pid, null, null);
    Assert.assertEquals(false, Boolean.parseBoolean(workflowToken.getTokenData().get("running").get(0).getValue()));
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) WorkflowTokenNodeDetail(co.cask.cdap.proto.WorkflowTokenNodeDetail) WorkflowManager(co.cask.cdap.test.WorkflowManager) ConflictException(co.cask.cdap.common.ConflictException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) RunRecord(co.cask.cdap.proto.RunRecord) ScheduleDetail(co.cask.cdap.proto.ScheduleDetail) WorkflowTokenDetail(co.cask.cdap.proto.WorkflowTokenDetail) Ignore(org.junit.Ignore) Category(org.junit.experimental.categories.Category) Test(org.junit.Test)

Example 7 with WorkflowManager

use of co.cask.cdap.test.WorkflowManager in project cdap by caskdata.

the class TestFrameworkTestRun method testAppWithPlugin.

@Test
public void testAppWithPlugin() throws Exception {
    ArtifactId artifactId = NamespaceId.DEFAULT.artifact("app-with-plugin", "1.0.0-SNAPSHOT");
    addAppArtifact(artifactId, AppWithPlugin.class);
    ArtifactId pluginArtifactId = NamespaceId.DEFAULT.artifact("test-plugin", "1.0.0-SNAPSHOT");
    addPluginArtifact(pluginArtifactId, artifactId, ToStringPlugin.class);
    ApplicationId appId = NamespaceId.DEFAULT.app("AppWithPlugin");
    AppRequest createRequest = new AppRequest(new ArtifactSummary(artifactId.getArtifact(), artifactId.getVersion()));
    ApplicationManager appManager = deployApplication(appId, createRequest);
    final WorkerManager workerManager = appManager.getWorkerManager(AppWithPlugin.WORKER);
    workerManager.start();
    workerManager.waitForStatus(false, 5, 1);
    Tasks.waitFor(false, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return workerManager.getHistory(ProgramRunStatus.COMPLETED).isEmpty();
        }
    }, 5, TimeUnit.SECONDS, 10, TimeUnit.MILLISECONDS);
    final ServiceManager serviceManager = appManager.getServiceManager(AppWithPlugin.SERVICE);
    serviceManager.start();
    serviceManager.waitForStatus(true, 1, 10);
    URL serviceURL = serviceManager.getServiceURL(5, TimeUnit.SECONDS);
    callServiceGet(serviceURL, "dummy");
    serviceManager.stop();
    serviceManager.waitForStatus(false, 1, 10);
    Tasks.waitFor(false, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return serviceManager.getHistory(ProgramRunStatus.KILLED).isEmpty();
        }
    }, 5, TimeUnit.SECONDS, 10, TimeUnit.MILLISECONDS);
    WorkflowManager workflowManager = appManager.getWorkflowManager(AppWithPlugin.WORKFLOW);
    workflowManager.start();
    workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
    List<RunRecord> runRecords = workflowManager.getHistory();
    Assert.assertNotEquals(ProgramRunStatus.FAILED, runRecords.get(0).getStatus());
    DataSetManager<KeyValueTable> workflowTableManager = getDataset(AppWithPlugin.WORKFLOW_TABLE);
    String value = Bytes.toString(workflowTableManager.get().read("val"));
    Assert.assertEquals(AppWithPlugin.TEST, value);
    Map<String, String> workflowTags = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, NamespaceId.DEFAULT.getNamespace(), Constants.Metrics.Tag.APP, "AppWithPlugin", Constants.Metrics.Tag.WORKFLOW, AppWithPlugin.WORKFLOW, Constants.Metrics.Tag.RUN_ID, runRecords.get(0).getPid());
    getMetricsManager().waitForTotalMetricCount(workflowTags, String.format("user.destroy.%s", AppWithPlugin.WORKFLOW), 1, 60, TimeUnit.SECONDS);
    // Testing Spark Plugins. First send some data to stream for the Spark program to process
    StreamManager streamManager = getStreamManager(AppWithPlugin.SPARK_STREAM);
    for (int i = 0; i < 5; i++) {
        streamManager.send("Message " + i);
    }
    SparkManager sparkManager = appManager.getSparkManager(AppWithPlugin.SPARK).start();
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 2, TimeUnit.MINUTES);
    // Verify the Spark result.
    DataSetManager<Table> dataSetManager = getDataset(AppWithPlugin.SPARK_TABLE);
    Table table = dataSetManager.get();
    try (Scanner scanner = table.scan(null, null)) {
        for (int i = 0; i < 5; i++) {
            Row row = scanner.next();
            Assert.assertNotNull(row);
            String expected = "Message " + i + " " + AppWithPlugin.TEST;
            Assert.assertEquals(expected, Bytes.toString(row.getRow()));
            Assert.assertEquals(expected, Bytes.toString(row.get(expected)));
        }
        // There shouldn't be any more rows in the table.
        Assert.assertNull(scanner.next());
    }
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) ApplicationManager(co.cask.cdap.test.ApplicationManager) ArtifactId(co.cask.cdap.proto.id.ArtifactId) WorkflowManager(co.cask.cdap.test.WorkflowManager) URL(java.net.URL) ServiceManager(co.cask.cdap.test.ServiceManager) SparkManager(co.cask.cdap.test.SparkManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) Table(co.cask.cdap.api.dataset.table.Table) ConflictException(co.cask.cdap.common.ConflictException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) AppRequest(co.cask.cdap.proto.artifact.AppRequest) WorkerManager(co.cask.cdap.test.WorkerManager) RunRecord(co.cask.cdap.proto.RunRecord) ArtifactSummary(co.cask.cdap.api.artifact.ArtifactSummary) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) StreamManager(co.cask.cdap.test.StreamManager) Row(co.cask.cdap.api.dataset.table.Row) ApplicationId(co.cask.cdap.proto.id.ApplicationId) Test(org.junit.Test)

Example 8 with WorkflowManager

use of co.cask.cdap.test.WorkflowManager in project cdap by caskdata.

the class TestFrameworkTestRun method testConcurrentRuns.

@Test
public void testConcurrentRuns() throws Exception {
    ApplicationManager appManager = deployApplication(ConcurrentRunTestApp.class);
    WorkerManager workerManager = appManager.getWorkerManager(ConcurrentRunTestApp.TestWorker.class.getSimpleName());
    workerManager.start();
    // Start another time should fail as worker doesn't support concurrent run.
    workerManager.waitForStatus(true);
    try {
        workerManager.start();
        Assert.fail("Expected failure to start worker");
    } catch (Exception e) {
        Assert.assertTrue(Throwables.getRootCause(e) instanceof ConflictException);
    }
    workerManager.stop();
    // Start the workflow
    File tmpDir = TEMP_FOLDER.newFolder();
    File actionFile = new File(tmpDir, "action.file");
    Map<String, String> args = Collections.singletonMap("action.file", actionFile.getAbsolutePath());
    WorkflowManager workflowManager = appManager.getWorkflowManager(ConcurrentRunTestApp.TestWorkflow.class.getSimpleName());
    // Starts two runs, both should succeed
    workflowManager.start(args);
    workflowManager.start(args);
    // Should get two active runs
    workflowManager.waitForRuns(ProgramRunStatus.RUNNING, 2, 10L, TimeUnit.SECONDS);
    // Touch the file to complete the workflow runs
    Files.touch(actionFile);
    workflowManager.waitForRuns(ProgramRunStatus.COMPLETED, 2, 10L, TimeUnit.SECONDS);
}
Also used : WorkerManager(co.cask.cdap.test.WorkerManager) ApplicationManager(co.cask.cdap.test.ApplicationManager) ConflictException(co.cask.cdap.common.ConflictException) WorkflowManager(co.cask.cdap.test.WorkflowManager) File(java.io.File) ConflictException(co.cask.cdap.common.ConflictException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 9 with WorkflowManager

use of co.cask.cdap.test.WorkflowManager in project cdap by caskdata.

the class TestFrameworkTestRun method testClusterName.

@Test
public void testClusterName() throws Exception {
    String clusterName = getConfiguration().get(Constants.CLUSTER_NAME);
    ApplicationManager appManager = deployApplication(ClusterNameTestApp.class);
    final DataSetManager<KeyValueTable> datasetManager = getDataset(ClusterNameTestApp.CLUSTER_NAME_TABLE);
    final KeyValueTable clusterNameTable = datasetManager.get();
    // A callable for reading the cluster name from the ClusterNameTable.
    // It is used for Tasks.waitFor call down below.
    final AtomicReference<String> key = new AtomicReference<>();
    Callable<String> readClusterName = new Callable<String>() {

        @Nullable
        @Override
        public String call() throws Exception {
            datasetManager.flush();
            byte[] bytes = clusterNameTable.read(key.get());
            return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
        }
    };
    // Service
    ServiceManager serviceManager = appManager.getServiceManager(ClusterNameTestApp.ClusterNameServiceHandler.class.getSimpleName()).start();
    Assert.assertEquals(clusterName, callServiceGet(serviceManager.getServiceURL(10, TimeUnit.SECONDS), "clusterName"));
    serviceManager.stop();
    // Worker
    WorkerManager workerManager = appManager.getWorkerManager(ClusterNameTestApp.ClusterNameWorker.class.getSimpleName()).start();
    key.set("worker.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    // The worker will stop by itself. No need to call stop
    workerManager.waitForRun(ProgramRunStatus.COMPLETED, 10, TimeUnit.SECONDS);
    // Flow
    FlowManager flowManager = appManager.getFlowManager(ClusterNameTestApp.ClusterNameFlow.class.getSimpleName()).start();
    key.set("flow.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    flowManager.stop();
    // MapReduce
    // Setup the input file used by MR
    Location location = this.<FileSet>getDataset(ClusterNameTestApp.INPUT_FILE_SET).get().getLocation("input");
    try (PrintStream printer = new PrintStream(location.getOutputStream(), true, "UTF-8")) {
        for (int i = 0; i < 10; i++) {
            printer.println("Hello World " + i);
        }
    }
    // Setup input and output dataset arguments
    Map<String, String> inputArgs = new HashMap<>();
    FileSetArguments.setInputPath(inputArgs, "input");
    Map<String, String> outputArgs = new HashMap<>();
    FileSetArguments.setOutputPath(outputArgs, "output");
    Map<String, String> args = new HashMap<>();
    args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.INPUT_FILE_SET, inputArgs));
    args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.OUTPUT_FILE_SET, outputArgs));
    MapReduceManager mrManager = appManager.getMapReduceManager(ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName()).start(args);
    key.set("mr.client.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set("mapper.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set("reducer.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    mrManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
    // Spark
    SparkManager sparkManager = appManager.getSparkManager(ClusterNameTestApp.ClusterNameSpark.class.getSimpleName()).start();
    key.set("spark.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
    // Workflow
    // Cleanup the output path for the MR job in the workflow first
    this.<FileSet>getDataset(ClusterNameTestApp.OUTPUT_FILE_SET).get().getLocation("output").delete(true);
    args = RuntimeArguments.addScope(Scope.MAPREDUCE, ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName(), args);
    WorkflowManager workflowManager = appManager.getWorkflowManager(ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName()).start(args);
    String prefix = ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName() + ".";
    key.set(prefix + "mr.client.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "mapper.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "reducer.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "spark.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "action.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
}
Also used : FlowManager(co.cask.cdap.test.FlowManager) PrintStream(java.io.PrintStream) ApplicationManager(co.cask.cdap.test.ApplicationManager) SparkManager(co.cask.cdap.test.SparkManager) FileSet(co.cask.cdap.api.dataset.lib.FileSet) MapReduceManager(co.cask.cdap.test.MapReduceManager) HashMap(java.util.HashMap) WorkflowManager(co.cask.cdap.test.WorkflowManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) Callable(java.util.concurrent.Callable) WorkerManager(co.cask.cdap.test.WorkerManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) ServiceManager(co.cask.cdap.test.ServiceManager) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Example 10 with WorkflowManager

use of co.cask.cdap.test.WorkflowManager in project cdap by caskdata.

the class DataPipelineTest method testExternalDatasetTracking.

private void testExternalDatasetTracking(Engine engine, boolean backwardsCompatible) throws Exception {
    String suffix = engine.name() + (backwardsCompatible ? "-bc" : "");
    // Define input/output datasets
    String expectedExternalDatasetInput = "fileInput-" + suffix;
    String expectedExternalDatasetOutput = "fileOutput-" + suffix;
    // Define input/output directories
    File inputDir = TMP_FOLDER.newFolder("input-" + suffix);
    String inputFile = "input-file1.txt";
    File outputDir = TMP_FOLDER.newFolder("output-" + suffix);
    File outputSubDir1 = new File(outputDir, "subdir1");
    File outputSubDir2 = new File(outputDir, "subdir2");
    if (!backwardsCompatible) {
        // Assert that there are no external datasets
        Assert.assertNull(getDataset(NamespaceId.DEFAULT.dataset(expectedExternalDatasetInput)).get());
        Assert.assertNull(getDataset(NamespaceId.DEFAULT.dataset(expectedExternalDatasetOutput)).get());
    }
    ETLBatchConfig.Builder builder = ETLBatchConfig.builder("* * * * *");
    ETLBatchConfig etlConfig = builder.setEngine(engine).addStage(new ETLStage("source", MockExternalSource.getPlugin(expectedExternalDatasetInput, inputDir.getAbsolutePath()))).addStage(new ETLStage("sink1", MockExternalSink.getPlugin(backwardsCompatible ? null : expectedExternalDatasetOutput, "dir1", outputSubDir1.getAbsolutePath()))).addStage(new ETLStage("sink2", MockExternalSink.getPlugin(backwardsCompatible ? null : expectedExternalDatasetOutput, "dir2", outputSubDir2.getAbsolutePath()))).addConnection("source", "sink1").addConnection("source", "sink2").build();
    AppRequest<ETLBatchConfig> appRequest = new AppRequest<>(APP_ARTIFACT, etlConfig);
    ApplicationId appId = NamespaceId.DEFAULT.app("ExternalDatasetApp-" + suffix);
    ApplicationManager appManager = deployApplication(appId, appRequest);
    Schema schema = Schema.recordOf("testRecord", Schema.Field.of("name", Schema.of(Schema.Type.STRING)));
    StructuredRecord recordSamuel = StructuredRecord.builder(schema).set("name", "samuel").build();
    StructuredRecord recordBob = StructuredRecord.builder(schema).set("name", "bob").build();
    StructuredRecord recordJane = StructuredRecord.builder(schema).set("name", "jane").build();
    ImmutableList<StructuredRecord> allInput = ImmutableList.of(recordSamuel, recordBob, recordJane);
    // Create input files
    MockExternalSource.writeInput(new File(inputDir, inputFile).getAbsolutePath(), allInput);
    WorkflowManager workflowManager = appManager.getWorkflowManager(SmartWorkflow.NAME);
    workflowManager.start();
    workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
    List<RunRecord> history = workflowManager.getHistory();
    // there should be only one completed run
    Assert.assertEquals(1, history.size());
    Assert.assertEquals(ProgramRunStatus.COMPLETED, history.get(0).getStatus());
    // Assert output
    Assert.assertEquals(allInput, MockExternalSink.readOutput(outputSubDir1.getAbsolutePath()));
    Assert.assertEquals(allInput, MockExternalSink.readOutput(outputSubDir2.getAbsolutePath()));
    if (!backwardsCompatible) {
        // Assert that external datasets got created
        Assert.assertNotNull(getDataset(NamespaceId.DEFAULT.dataset(expectedExternalDatasetInput)).get());
        Assert.assertNotNull(getDataset(NamespaceId.DEFAULT.dataset(expectedExternalDatasetOutput)).get());
    }
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) Schema(co.cask.cdap.api.data.schema.Schema) WorkflowManager(co.cask.cdap.test.WorkflowManager) StructuredRecord(co.cask.cdap.api.data.format.StructuredRecord) AppRequest(co.cask.cdap.proto.artifact.AppRequest) ETLBatchConfig(co.cask.cdap.etl.proto.v2.ETLBatchConfig) RunRecord(co.cask.cdap.proto.RunRecord) ETLStage(co.cask.cdap.etl.proto.v2.ETLStage) ApplicationId(co.cask.cdap.proto.id.ApplicationId) File(java.io.File)

Aggregations

WorkflowManager (co.cask.cdap.test.WorkflowManager)59 ApplicationManager (co.cask.cdap.test.ApplicationManager)57 ApplicationId (co.cask.cdap.proto.id.ApplicationId)46 ETLBatchConfig (co.cask.cdap.etl.proto.v2.ETLBatchConfig)44 ETLStage (co.cask.cdap.etl.proto.v2.ETLStage)44 KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)42 AppRequest (co.cask.cdap.proto.artifact.AppRequest)41 Table (co.cask.cdap.api.dataset.table.Table)39 Test (org.junit.Test)35 StructuredRecord (co.cask.cdap.api.data.format.StructuredRecord)34 Schema (co.cask.cdap.api.data.schema.Schema)31 HashMap (java.util.HashMap)12 RunRecord (co.cask.cdap.proto.RunRecord)9 ArrayList (java.util.ArrayList)9 ETLPlugin (co.cask.cdap.etl.proto.v2.ETLPlugin)7 HashSet (java.util.HashSet)7 ConflictException (co.cask.cdap.common.ConflictException)6 File (java.io.File)6 IOException (java.io.IOException)6 TimeoutException (java.util.concurrent.TimeoutException)6