Search in sources :

Example 6 with WorkerManager

use of co.cask.cdap.test.WorkerManager in project cdap by caskdata.

the class TestFrameworkTestRun method testConcurrentRuns.

@Test
public void testConcurrentRuns() throws Exception {
    ApplicationManager appManager = deployApplication(ConcurrentRunTestApp.class);
    WorkerManager workerManager = appManager.getWorkerManager(ConcurrentRunTestApp.TestWorker.class.getSimpleName());
    workerManager.start();
    // Start another time should fail as worker doesn't support concurrent run.
    workerManager.waitForStatus(true);
    try {
        workerManager.start();
        Assert.fail("Expected failure to start worker");
    } catch (Exception e) {
        Assert.assertTrue(Throwables.getRootCause(e) instanceof ConflictException);
    }
    workerManager.stop();
    // Start the workflow
    File tmpDir = TEMP_FOLDER.newFolder();
    File actionFile = new File(tmpDir, "action.file");
    Map<String, String> args = Collections.singletonMap("action.file", actionFile.getAbsolutePath());
    WorkflowManager workflowManager = appManager.getWorkflowManager(ConcurrentRunTestApp.TestWorkflow.class.getSimpleName());
    // Starts two runs, both should succeed
    workflowManager.start(args);
    workflowManager.start(args);
    // Should get two active runs
    workflowManager.waitForRuns(ProgramRunStatus.RUNNING, 2, 10L, TimeUnit.SECONDS);
    // Touch the file to complete the workflow runs
    Files.touch(actionFile);
    workflowManager.waitForRuns(ProgramRunStatus.COMPLETED, 2, 10L, TimeUnit.SECONDS);
}
Also used : WorkerManager(co.cask.cdap.test.WorkerManager) ApplicationManager(co.cask.cdap.test.ApplicationManager) ConflictException(co.cask.cdap.common.ConflictException) WorkflowManager(co.cask.cdap.test.WorkflowManager) File(java.io.File) ConflictException(co.cask.cdap.common.ConflictException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 7 with WorkerManager

use of co.cask.cdap.test.WorkerManager in project cdap by caskdata.

the class TestFrameworkTestRun method testClusterName.

@Test
public void testClusterName() throws Exception {
    String clusterName = getConfiguration().get(Constants.CLUSTER_NAME);
    ApplicationManager appManager = deployApplication(ClusterNameTestApp.class);
    final DataSetManager<KeyValueTable> datasetManager = getDataset(ClusterNameTestApp.CLUSTER_NAME_TABLE);
    final KeyValueTable clusterNameTable = datasetManager.get();
    // A callable for reading the cluster name from the ClusterNameTable.
    // It is used for Tasks.waitFor call down below.
    final AtomicReference<String> key = new AtomicReference<>();
    Callable<String> readClusterName = new Callable<String>() {

        @Nullable
        @Override
        public String call() throws Exception {
            datasetManager.flush();
            byte[] bytes = clusterNameTable.read(key.get());
            return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
        }
    };
    // Service
    ServiceManager serviceManager = appManager.getServiceManager(ClusterNameTestApp.ClusterNameServiceHandler.class.getSimpleName()).start();
    Assert.assertEquals(clusterName, callServiceGet(serviceManager.getServiceURL(10, TimeUnit.SECONDS), "clusterName"));
    serviceManager.stop();
    // Worker
    WorkerManager workerManager = appManager.getWorkerManager(ClusterNameTestApp.ClusterNameWorker.class.getSimpleName()).start();
    key.set("worker.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    // The worker will stop by itself. No need to call stop
    workerManager.waitForRun(ProgramRunStatus.COMPLETED, 10, TimeUnit.SECONDS);
    // Flow
    FlowManager flowManager = appManager.getFlowManager(ClusterNameTestApp.ClusterNameFlow.class.getSimpleName()).start();
    key.set("flow.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    flowManager.stop();
    // MapReduce
    // Setup the input file used by MR
    Location location = this.<FileSet>getDataset(ClusterNameTestApp.INPUT_FILE_SET).get().getLocation("input");
    try (PrintStream printer = new PrintStream(location.getOutputStream(), true, "UTF-8")) {
        for (int i = 0; i < 10; i++) {
            printer.println("Hello World " + i);
        }
    }
    // Setup input and output dataset arguments
    Map<String, String> inputArgs = new HashMap<>();
    FileSetArguments.setInputPath(inputArgs, "input");
    Map<String, String> outputArgs = new HashMap<>();
    FileSetArguments.setOutputPath(outputArgs, "output");
    Map<String, String> args = new HashMap<>();
    args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.INPUT_FILE_SET, inputArgs));
    args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.OUTPUT_FILE_SET, outputArgs));
    MapReduceManager mrManager = appManager.getMapReduceManager(ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName()).start(args);
    key.set("mr.client.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set("mapper.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set("reducer.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    mrManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
    // Spark
    SparkManager sparkManager = appManager.getSparkManager(ClusterNameTestApp.ClusterNameSpark.class.getSimpleName()).start();
    key.set("spark.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
    // Workflow
    // Cleanup the output path for the MR job in the workflow first
    this.<FileSet>getDataset(ClusterNameTestApp.OUTPUT_FILE_SET).get().getLocation("output").delete(true);
    args = RuntimeArguments.addScope(Scope.MAPREDUCE, ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName(), args);
    WorkflowManager workflowManager = appManager.getWorkflowManager(ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName()).start(args);
    String prefix = ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName() + ".";
    key.set(prefix + "mr.client.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "mapper.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "reducer.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "spark.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    key.set(prefix + "action.cluster.name");
    Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
}
Also used : FlowManager(co.cask.cdap.test.FlowManager) PrintStream(java.io.PrintStream) ApplicationManager(co.cask.cdap.test.ApplicationManager) SparkManager(co.cask.cdap.test.SparkManager) FileSet(co.cask.cdap.api.dataset.lib.FileSet) MapReduceManager(co.cask.cdap.test.MapReduceManager) HashMap(java.util.HashMap) WorkflowManager(co.cask.cdap.test.WorkflowManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) Callable(java.util.concurrent.Callable) WorkerManager(co.cask.cdap.test.WorkerManager) KeyValueTable(co.cask.cdap.api.dataset.lib.KeyValueTable) ServiceManager(co.cask.cdap.test.ServiceManager) Location(org.apache.twill.filesystem.Location) Test(org.junit.Test)

Example 8 with WorkerManager

use of co.cask.cdap.test.WorkerManager in project cdap by caskdata.

the class TestFrameworkTestRun method testWorkerInstances.

@Category(SlowTests.class)
@Test
public void testWorkerInstances() throws Exception {
    ApplicationManager applicationManager = deployApplication(testSpace, AppUsingGetServiceURL.class);
    WorkerManager workerManager = applicationManager.getWorkerManager(AppUsingGetServiceURL.PINGING_WORKER).start();
    workerManager.waitForStatus(true);
    // Should be 5 instances when first started.
    workerInstancesCheck(workerManager, 5);
    // Test increasing instances.
    workerManager.setInstances(10);
    workerInstancesCheck(workerManager, 10);
    // Test decreasing instances.
    workerManager.setInstances(2);
    workerInstancesCheck(workerManager, 2);
    // Test requesting same number of instances.
    workerManager.setInstances(2);
    workerInstancesCheck(workerManager, 2);
    WorkerManager lifecycleWorkerManager = applicationManager.getWorkerManager(AppUsingGetServiceURL.LIFECYCLE_WORKER);
    lifecycleWorkerManager.setInstances(3);
    lifecycleWorkerManager.start().waitForStatus(true);
    workerInstancesCheck(lifecycleWorkerManager, 3);
    for (int i = 0; i < 3; i++) {
        kvTableKeyCheck(testSpace, AppUsingGetServiceURL.WORKER_INSTANCES_DATASET, Bytes.toBytes(String.format("init.%d", i)));
    }
    // Set 5 instances for the LifecycleWorker
    lifecycleWorkerManager.setInstances(5);
    workerInstancesCheck(lifecycleWorkerManager, 5);
    // Make sure all the keys have been written before stopping the worker
    for (int i = 0; i < 5; i++) {
        kvTableKeyCheck(testSpace, AppUsingGetServiceURL.WORKER_INSTANCES_DATASET, Bytes.toBytes(String.format("init.%d", i)));
    }
    lifecycleWorkerManager.stop();
    lifecycleWorkerManager.waitForStatus(false);
    if (workerManager.isRunning()) {
        workerManager.stop();
    }
    workerManager.waitForStatus(false);
    // Should be same instances after being stopped.
    workerInstancesCheck(lifecycleWorkerManager, 5);
    workerInstancesCheck(workerManager, 2);
    // Assert the LifecycleWorker dataset writes
    // 3 workers should have started with 3 total instances. 2 more should later start with 5 total instances.
    assertWorkerDatasetWrites(Bytes.toBytes("init"), Bytes.stopKeyForPrefix(Bytes.toBytes("init.2")), 3, 3);
    assertWorkerDatasetWrites(Bytes.toBytes("init.3"), Bytes.stopKeyForPrefix(Bytes.toBytes("init")), 2, 5);
    // Test that the worker had 5 instances when stopped, and each knew that there were 5 instances
    byte[] startRow = Bytes.toBytes("stop");
    assertWorkerDatasetWrites(startRow, Bytes.stopKeyForPrefix(startRow), 5, 5);
}
Also used : WorkerManager(co.cask.cdap.test.WorkerManager) ApplicationManager(co.cask.cdap.test.ApplicationManager) Category(org.junit.experimental.categories.Category) Test(org.junit.Test)

Example 9 with WorkerManager

use of co.cask.cdap.test.WorkerManager in project cdap by caskdata.

the class TestFrameworkTestRun method testWorkerStop.

@Test
public void testWorkerStop() throws Exception {
    // Test to make sure the worker program's status goes to stopped after the run method finishes
    ApplicationManager manager = deployApplication(NoOpWorkerApp.class);
    WorkerManager workerManager = manager.getWorkerManager("NoOpWorker");
    workerManager.start();
    workerManager.waitForStatus(false, 30, 1);
}
Also used : WorkerManager(co.cask.cdap.test.WorkerManager) ApplicationManager(co.cask.cdap.test.ApplicationManager) Test(org.junit.Test)

Example 10 with WorkerManager

use of co.cask.cdap.test.WorkerManager in project cdap by caskdata.

the class ETLWorkerTest method testDAG.

@Test
public void testDAG() throws Exception {
    Schema schema = Schema.recordOf("testRecord", Schema.Field.of("x", Schema.of(Schema.Type.INT)));
    StructuredRecord record1 = StructuredRecord.builder(schema).set("x", 1).build();
    StructuredRecord record2 = StructuredRecord.builder(schema).set("x", 2).build();
    StructuredRecord record3 = StructuredRecord.builder(schema).set("x", 3).build();
    List<StructuredRecord> input = ImmutableList.of(record1, record2, record3);
    /*
     *            ----- value filter ------- sink1
     *           |
     * source --------- double --------
     *           |                     |---- sink2
     *            ----- identity ------
     */
    File sink1Out = TMP_FOLDER.newFolder();
    File sink2Out = TMP_FOLDER.newFolder();
    ETLRealtimeConfig etlConfig = ETLRealtimeConfig.builder().addStage(new ETLStage("source", MockSource.getPlugin(input))).addStage(new ETLStage("sink1", MockSink.getPlugin(sink1Out))).addStage(new ETLStage("sink2", MockSink.getPlugin(sink2Out))).addStage(new ETLStage("valueFilter", IntValueFilterTransform.getPlugin("x", 2))).addStage(new ETLStage("double", DoubleTransform.getPlugin())).addStage(new ETLStage("identity", IdentityTransform.getPlugin())).addConnection("source", "valueFilter").addConnection("source", "double").addConnection("source", "identity").addConnection("valueFilter", "sink1").addConnection("double", "sink2").addConnection("identity", "sink2").build();
    ApplicationId appId = NamespaceId.DEFAULT.app("dagTest");
    AppRequest<ETLRealtimeConfig> appRequest = new AppRequest<>(APP_ARTIFACT, etlConfig);
    ApplicationManager appManager = deployApplication(appId, appRequest);
    Assert.assertNotNull(appManager);
    WorkerManager workerManager = appManager.getWorkerManager(ETLWorker.NAME);
    workerManager.start();
    workerManager.waitForStatus(true, 10, 1);
    try {
        List<StructuredRecord> sink1output = MockSink.getRecords(sink1Out, 0, 10, TimeUnit.SECONDS);
        List<StructuredRecord> sink1expected = ImmutableList.of(record1, record3);
        Assert.assertEquals(sink1expected, sink1output);
        List<StructuredRecord> sink2output = MockSink.getRecords(sink2Out, 0, 10, TimeUnit.SECONDS);
        Assert.assertEquals(9, sink2output.size());
    } finally {
        stopWorker(workerManager);
    }
    validateMetric(3, appId, "source.records.out");
    validateMetric(3, appId, "valueFilter.records.in");
    validateMetric(2, appId, "valueFilter.records.out");
    validateMetric(3, appId, "double.records.in");
    validateMetric(6, appId, "double.records.out");
    validateMetric(3, appId, "identity.records.in");
    validateMetric(3, appId, "identity.records.out");
    validateMetric(2, appId, "sink1.records.in");
    validateMetric(9, appId, "sink2.records.in");
}
Also used : WorkerManager(co.cask.cdap.test.WorkerManager) ApplicationManager(co.cask.cdap.test.ApplicationManager) ETLStage(co.cask.cdap.etl.proto.v2.ETLStage) Schema(co.cask.cdap.api.data.schema.Schema) ETLRealtimeConfig(co.cask.cdap.etl.proto.v2.ETLRealtimeConfig) ApplicationId(co.cask.cdap.proto.id.ApplicationId) File(java.io.File) StructuredRecord(co.cask.cdap.api.data.format.StructuredRecord) AppRequest(co.cask.cdap.proto.artifact.AppRequest) Test(org.junit.Test)

Aggregations

ApplicationManager (co.cask.cdap.test.ApplicationManager)18 WorkerManager (co.cask.cdap.test.WorkerManager)18 Test (org.junit.Test)18 Category (org.junit.experimental.categories.Category)7 KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)5 AppRequest (co.cask.cdap.proto.artifact.AppRequest)5 ApplicationId (co.cask.cdap.proto.id.ApplicationId)5 ServiceManager (co.cask.cdap.test.ServiceManager)5 TimeoutException (java.util.concurrent.TimeoutException)5 ConflictException (co.cask.cdap.common.ConflictException)4 ETLRealtimeConfig (co.cask.cdap.etl.proto.v2.ETLRealtimeConfig)4 ETLStage (co.cask.cdap.etl.proto.v2.ETLStage)4 WorkflowManager (co.cask.cdap.test.WorkflowManager)4 File (java.io.File)4 IOException (java.io.IOException)4 StructuredRecord (co.cask.cdap.api.data.format.StructuredRecord)3 Schema (co.cask.cdap.api.data.schema.Schema)3 SparkManager (co.cask.cdap.test.SparkManager)3 Table (co.cask.cdap.api.dataset.table.Table)2 MessagingAdmin (co.cask.cdap.api.messaging.MessagingAdmin)2