use of io.cdap.cdap.test.WorkerManager in project cdap by caskdata.
the class TestFrameworkTestRun method testAppWithPlugin.
@Test
public void testAppWithPlugin() throws Exception {
ArtifactId artifactId = NamespaceId.DEFAULT.artifact("app-with-plugin", "1.0.0-SNAPSHOT");
addAppArtifact(artifactId, AppWithPlugin.class);
ArtifactId pluginArtifactId = NamespaceId.DEFAULT.artifact("test-plugin", "1.0.0-SNAPSHOT");
addPluginArtifact(pluginArtifactId, artifactId, ToStringPlugin.class);
ApplicationId appId = NamespaceId.DEFAULT.app("AppWithPlugin");
AppRequest createRequest = new AppRequest(new ArtifactSummary(artifactId.getArtifact(), artifactId.getVersion()));
ApplicationManager appManager = deployApplication(appId, createRequest);
final WorkerManager workerManager = appManager.getWorkerManager(AppWithPlugin.WORKER);
workerManager.start();
workerManager.waitForRun(ProgramRunStatus.COMPLETED, 10, TimeUnit.SECONDS);
final ServiceManager serviceManager = appManager.getServiceManager(AppWithPlugin.SERVICE);
serviceManager.start();
serviceManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
URL serviceURL = serviceManager.getServiceURL(5, TimeUnit.SECONDS);
callServiceGet(serviceURL, "dummy");
serviceManager.stop();
serviceManager.waitForStopped(10, TimeUnit.SECONDS);
WorkflowManager workflowManager = appManager.getWorkflowManager(AppWithPlugin.WORKFLOW);
workflowManager.start();
workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 5, TimeUnit.MINUTES);
List<RunRecord> runRecords = workflowManager.getHistory();
Assert.assertNotEquals(ProgramRunStatus.FAILED, runRecords.get(0).getStatus());
DataSetManager<KeyValueTable> workflowTableManager = getDataset(AppWithPlugin.WORKFLOW_TABLE);
String value = Bytes.toString(workflowTableManager.get().read("val"));
Assert.assertEquals(AppWithPlugin.TEST, value);
Map<String, String> workflowTags = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, NamespaceId.DEFAULT.getNamespace(), Constants.Metrics.Tag.APP, "AppWithPlugin", Constants.Metrics.Tag.WORKFLOW, AppWithPlugin.WORKFLOW, Constants.Metrics.Tag.RUN_ID, runRecords.get(0).getPid());
getMetricsManager().waitForTotalMetricCount(workflowTags, String.format("user.destroy.%s", AppWithPlugin.WORKFLOW), 1, 60, TimeUnit.SECONDS);
// Testing Spark Plugins. First send some data to fileset for the Spark program to process
DataSetManager<FileSet> fileSetManager = getDataset(AppWithPlugin.SPARK_INPUT);
FileSet fileSet = fileSetManager.get();
try (PrintStream out = new PrintStream(fileSet.getLocation("input").append("file.txt").getOutputStream(), true, "UTF-8")) {
for (int i = 0; i < 5; i++) {
out.println("Message " + i);
}
}
Map<String, String> sparkArgs = new HashMap<>();
FileSetArguments.setInputPath(sparkArgs, "input");
SparkManager sparkManager = appManager.getSparkManager(AppWithPlugin.SPARK).start(sparkArgs);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 2, TimeUnit.MINUTES);
// Verify the Spark result.
DataSetManager<Table> dataSetManager = getDataset(AppWithPlugin.SPARK_TABLE);
Table table = dataSetManager.get();
try (Scanner scanner = table.scan(null, null)) {
for (int i = 0; i < 5; i++) {
Row row = scanner.next();
Assert.assertNotNull(row);
String expected = "Message " + i + " " + AppWithPlugin.TEST;
Assert.assertEquals(expected, Bytes.toString(row.getRow()));
Assert.assertEquals(expected, Bytes.toString(row.get(expected)));
}
// There shouldn't be any more rows in the table.
Assert.assertNull(scanner.next());
}
}
use of io.cdap.cdap.test.WorkerManager in project cdap by caskdata.
the class TestFrameworkTestRun method testAppWithServices.
@Category(SlowTests.class)
@Test
public void testAppWithServices() throws Exception {
ApplicationManager applicationManager = deployApplication(AppWithServices.class);
LOG.info("Deployed.");
ServiceManager serviceManager = applicationManager.getServiceManager(AppWithServices.SERVICE_NAME).start();
serviceManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
LOG.info("Service Started");
URL serviceURL = serviceManager.getServiceURL(15, TimeUnit.SECONDS);
Assert.assertNotNull(serviceURL);
// Call the ping endpoint
URL url = new URL(serviceURL, "ping2");
HttpRequest request = HttpRequest.get(url).build();
HttpResponse response = executeHttp(request);
Assert.assertEquals(200, response.getResponseCode());
// Call the failure endpoint
url = new URL(serviceURL, "failure");
request = HttpRequest.get(url).build();
response = executeHttp(request);
Assert.assertEquals(500, response.getResponseCode());
Assert.assertTrue(response.getResponseBodyAsString().contains("Exception"));
// Call the verify ClassLoader endpoint
url = new URL(serviceURL, "verifyClassLoader");
request = HttpRequest.get(url).build();
response = executeHttp(request);
Assert.assertEquals(200, response.getResponseCode());
RuntimeMetrics serviceMetrics = serviceManager.getMetrics();
serviceMetrics.waitForinput(3, 5, TimeUnit.SECONDS);
Assert.assertEquals(3, serviceMetrics.getInput());
Assert.assertEquals(2, serviceMetrics.getProcessed());
Assert.assertEquals(1, serviceMetrics.getException());
// in the AppWithServices the handlerName is same as the serviceName - "ServerService" handler
RuntimeMetrics handlerMetrics = getMetricsManager().getServiceHandlerMetrics(NamespaceId.DEFAULT.getNamespace(), AppWithServices.APP_NAME, AppWithServices.SERVICE_NAME, AppWithServices.SERVICE_NAME);
handlerMetrics.waitForinput(3, 5, TimeUnit.SECONDS);
Assert.assertEquals(3, handlerMetrics.getInput());
Assert.assertEquals(2, handlerMetrics.getProcessed());
Assert.assertEquals(1, handlerMetrics.getException());
// we can verify metrics, by adding getServiceMetrics in MetricsManager and then disabling the system scope test in
// TestMetricsCollectionService
LOG.info("DatasetUpdateService Started");
Map<String, String> args = ImmutableMap.of(AppWithServices.WRITE_VALUE_RUN_KEY, AppWithServices.DATASET_TEST_VALUE, AppWithServices.WRITE_VALUE_STOP_KEY, AppWithServices.DATASET_TEST_VALUE_STOP);
ServiceManager datasetWorkerServiceManager = applicationManager.getServiceManager(AppWithServices.DATASET_WORKER_SERVICE_NAME).start(args);
WorkerManager datasetWorker = applicationManager.getWorkerManager(AppWithServices.DATASET_UPDATE_WORKER).start(args);
datasetWorker.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
datasetWorkerServiceManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
ServiceManager noopManager = applicationManager.getServiceManager("NoOpService").start();
noopManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
// We don't know when the datasetWorker run() method executed, hence need to retry on the service call
// until it can get a value from a dataset, which was written out by the datasetWorker.
AtomicInteger called = new AtomicInteger(0);
AtomicInteger failed = new AtomicInteger(0);
Tasks.waitFor(AppWithServices.DATASET_TEST_VALUE, new Callable<String>() {
@Override
public String call() throws Exception {
URL url = noopManager.getServiceURL();
String path = "ping/" + AppWithServices.DATASET_TEST_KEY;
try {
called.incrementAndGet();
return new Gson().fromJson(callServiceGet(url, path), String.class);
} catch (IOException e) {
failed.incrementAndGet();
LOG.debug("Exception when reading from service {}/{}", url, path, e);
}
return null;
}
}, 30, TimeUnit.SECONDS);
// Validates the metrics emitted by the service call.
handlerMetrics = getMetricsManager().getServiceHandlerMetrics(NamespaceId.DEFAULT.getNamespace(), AppWithServices.APP_NAME, "NoOpService", "NoOpHandler");
handlerMetrics.waitForinput(called.get(), 5, TimeUnit.SECONDS);
handlerMetrics.waitForProcessed(1, 5, TimeUnit.SECONDS);
handlerMetrics.waitForException(failed.get(), 5, TimeUnit.SECONDS);
// Test that a service can discover another service
String path = String.format("discover/%s/%s", AppWithServices.APP_NAME, AppWithServices.DATASET_WORKER_SERVICE_NAME);
url = new URL(serviceURL, path);
request = HttpRequest.get(url).build();
response = executeHttp(request);
Assert.assertEquals(200, response.getResponseCode());
datasetWorker.stop();
datasetWorker.waitForStopped(10, TimeUnit.SECONDS);
datasetWorkerServiceManager.stop();
datasetWorkerServiceManager.waitForStopped(10, TimeUnit.SECONDS);
LOG.info("DatasetUpdateService Stopped");
serviceManager.stop();
serviceManager.waitForStopped(10, TimeUnit.SECONDS);
LOG.info("ServerService Stopped");
// Since all worker are stopped, we can just hit the service to read the dataset. No retry needed.
String result = callServiceGet(noopManager.getServiceURL(), "ping/" + AppWithServices.DATASET_TEST_KEY_STOP);
String decodedResult = new Gson().fromJson(result, String.class);
Assert.assertEquals(AppWithServices.DATASET_TEST_VALUE_STOP, decodedResult);
result = callServiceGet(noopManager.getServiceURL(), "ping/" + AppWithServices.DATASET_TEST_KEY_STOP_2);
decodedResult = new Gson().fromJson(result, String.class);
Assert.assertEquals(AppWithServices.DATASET_TEST_VALUE_STOP_2, decodedResult);
}
use of io.cdap.cdap.test.WorkerManager in project cdap by caskdata.
the class TestFrameworkTestRun method testWorkerThrowingException.
@Test
public void testWorkerThrowingException() throws Exception {
ApplicationManager appManager = deployApplication(AppWithExceptionThrowingWorker.class);
final WorkerManager workerManager = appManager.getWorkerManager(AppWithExceptionThrowingWorker.WORKER_NAME);
// Only one instance of the worker and it throws an exception.
// ProgramRunStatus should go to FAILED state, except the one that throws in the destroy() method.
testExceptionWorker(workerManager, 0, 0);
// Test a case where worker completes without an exception.
// There should be in total two completed runs.
// One from the one that throws in destroy() in testExceptionWorker(), one from the next line.
workerManager.start();
workerManager.waitForRuns(ProgramRunStatus.COMPLETED, 2, 5, TimeUnit.SECONDS);
// Few of the instances of the worker will throw an exception, while others complete normally. Still the
// ProgramRunStatus should go to FAILED state, , except for those that throws in the destroy() method.
workerManager.setInstances(9);
testExceptionWorker(workerManager, 2, 2);
// Test a case where worker completes without an exception.
// There should be four completed runs.
// Two from throws that throws in destroy() in testExceptionWorker(),
// one from the previous normal run, and one from next line.
workerManager.start();
workerManager.waitForRuns(ProgramRunStatus.COMPLETED, 4, 10, TimeUnit.SECONDS);
}
use of io.cdap.cdap.test.WorkerManager in project cdap by caskdata.
the class TestFrameworkTestRun method testConcurrentRuns.
@Test
public void testConcurrentRuns() throws Exception {
ApplicationManager appManager = deployApplication(ConcurrentRunTestApp.class);
WorkerManager workerManager = appManager.getWorkerManager(ConcurrentRunTestApp.TestWorker.class.getSimpleName());
workerManager.start();
// Start another time should fail as worker doesn't support concurrent run.
workerManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
try {
workerManager.start();
Assert.fail("Expected failure to start worker");
} catch (Exception e) {
Assert.assertTrue(Throwables.getRootCause(e) instanceof ConflictException);
}
workerManager.waitForRun(ProgramRunStatus.RUNNING, 10, TimeUnit.SECONDS);
workerManager.stop();
// Start the workflow
File tmpDir = TEMP_FOLDER.newFolder();
File actionFile = new File(tmpDir, "action.file");
Map<String, String> args = Collections.singletonMap("action.file", actionFile.getAbsolutePath());
WorkflowManager workflowManager = appManager.getWorkflowManager(ConcurrentRunTestApp.TestWorkflow.class.getSimpleName());
// Starts two runs, both should succeed
workflowManager.start(args);
workflowManager.start(args);
// Should get two active runs
workflowManager.waitForRuns(ProgramRunStatus.RUNNING, 2, 10L, TimeUnit.SECONDS);
// Touch the file to complete the workflow runs
Files.touch(actionFile);
workflowManager.waitForRuns(ProgramRunStatus.COMPLETED, 2, 10L, TimeUnit.SECONDS);
}
use of io.cdap.cdap.test.WorkerManager in project cdap by caskdata.
the class TestFrameworkTestRun method testClusterName.
@Test
public void testClusterName() throws Exception {
String clusterName = getConfiguration().get(Constants.CLUSTER_NAME);
ApplicationManager appManager = deployApplication(ClusterNameTestApp.class);
final DataSetManager<KeyValueTable> datasetManager = getDataset(ClusterNameTestApp.CLUSTER_NAME_TABLE);
final KeyValueTable clusterNameTable = datasetManager.get();
// A callable for reading the cluster name from the ClusterNameTable.
// It is used for Tasks.waitFor call down below.
final AtomicReference<String> key = new AtomicReference<>();
Callable<String> readClusterName = new Callable<String>() {
@Nullable
@Override
public String call() throws Exception {
datasetManager.flush();
byte[] bytes = clusterNameTable.read(key.get());
return bytes == null ? null : new String(bytes, StandardCharsets.UTF_8);
}
};
// Service
ServiceManager serviceManager = appManager.getServiceManager(ClusterNameTestApp.ClusterNameServiceHandler.class.getSimpleName()).start();
Assert.assertEquals(clusterName, callServiceGet(serviceManager.getServiceURL(10, TimeUnit.SECONDS), "clusterName"));
serviceManager.stop();
// Worker
WorkerManager workerManager = appManager.getWorkerManager(ClusterNameTestApp.ClusterNameWorker.class.getSimpleName()).start();
key.set("worker.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// The worker will stop by itself. No need to call stop
workerManager.waitForRun(ProgramRunStatus.COMPLETED, 10, TimeUnit.SECONDS);
// MapReduce
// Setup the input file used by MR
Location location = this.<FileSet>getDataset(ClusterNameTestApp.INPUT_FILE_SET).get().getLocation("input");
try (PrintStream printer = new PrintStream(location.getOutputStream(), true, "UTF-8")) {
for (int i = 0; i < 10; i++) {
printer.println("Hello World " + i);
}
}
// Setup input and output dataset arguments
Map<String, String> inputArgs = new HashMap<>();
FileSetArguments.setInputPath(inputArgs, "input");
Map<String, String> outputArgs = new HashMap<>();
FileSetArguments.setOutputPath(outputArgs, "output");
Map<String, String> args = new HashMap<>();
args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.INPUT_FILE_SET, inputArgs));
args.putAll(RuntimeArguments.addScope(Scope.DATASET, ClusterNameTestApp.OUTPUT_FILE_SET, outputArgs));
MapReduceManager mrManager = appManager.getMapReduceManager(ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName()).start(args);
key.set("mr.client.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set("mapper.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set("reducer.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
mrManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
// Spark
SparkManager sparkManager = appManager.getSparkManager(ClusterNameTestApp.ClusterNameSpark.class.getSimpleName()).start();
key.set("spark.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
// Workflow
// Cleanup the output path for the MR job in the workflow first
this.<FileSet>getDataset(ClusterNameTestApp.OUTPUT_FILE_SET).get().getLocation("output").delete(true);
args = RuntimeArguments.addScope(Scope.MAPREDUCE, ClusterNameTestApp.ClusterNameMapReduce.class.getSimpleName(), args);
WorkflowManager workflowManager = appManager.getWorkflowManager(ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName()).start(args);
String prefix = ClusterNameTestApp.ClusterNameWorkflow.class.getSimpleName() + ".";
key.set(prefix + "mr.client.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "mapper.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "reducer.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "spark.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
key.set(prefix + "action.cluster.name");
Tasks.waitFor(clusterName, readClusterName, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 120, TimeUnit.SECONDS);
}
Aggregations