use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class TestFrameworkTestRun method testWorkflowStatus.
@Test
public void testWorkflowStatus() throws Exception {
ApplicationManager appManager = deployApplication(WorkflowStatusTestApp.class);
File workflowSuccess = new File(TMP_FOLDER.newFolder() + "/workflow.success");
File actionSuccess = new File(TMP_FOLDER.newFolder() + "/action.success");
File workflowKilled = new File(TMP_FOLDER.newFolder() + "/workflow.killed");
File firstFile = new File(TMP_FOLDER.newFolder() + "/first");
File firstFileDone = new File(TMP_FOLDER.newFolder() + "/first.done");
WorkflowManager workflowManager = appManager.getWorkflowManager(WorkflowStatusTestApp.WORKFLOW_NAME);
workflowManager.start(ImmutableMap.of("workflow.success.file", workflowSuccess.getAbsolutePath(), "action.success.file", actionSuccess.getAbsolutePath(), "throw.exception", "true"));
workflowManager.waitForRun(ProgramRunStatus.FAILED, 1, TimeUnit.MINUTES);
// Since action and workflow failed the files should not exist
Assert.assertFalse(workflowSuccess.exists());
Assert.assertFalse(actionSuccess.exists());
workflowManager.start(ImmutableMap.of("workflow.success.file", workflowSuccess.getAbsolutePath(), "action.success.file", actionSuccess.getAbsolutePath()));
workflowManager.waitForRun(ProgramRunStatus.COMPLETED, 1, TimeUnit.MINUTES);
Assert.assertTrue(workflowSuccess.exists());
Assert.assertTrue(actionSuccess.exists());
// Test the killed status
workflowManager.start(ImmutableMap.of("workflow.killed.file", workflowKilled.getAbsolutePath(), "first.file", firstFile.getAbsolutePath(), "first.done.file", firstFileDone.getAbsolutePath(), "test.killed", "true"));
verifyFileExists(Lists.newArrayList(firstFile));
workflowManager.stop();
workflowManager.waitForRun(ProgramRunStatus.KILLED, 1, TimeUnit.MINUTES);
Assert.assertTrue(workflowKilled.exists());
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class TestFrameworkTestRun method testTransactionHandlerService.
@Test
public void testTransactionHandlerService() throws Exception {
ApplicationManager applicationManager = deployApplication(testSpace, AppWithServices.class);
LOG.info("Deployed.");
ServiceManager serviceManager = applicationManager.getServiceManager(AppWithServices.TRANSACTIONS_SERVICE_NAME).start();
serviceManager.waitForStatus(true);
LOG.info("Service Started");
final URL baseUrl = serviceManager.getServiceURL(15, TimeUnit.SECONDS);
Assert.assertNotNull(baseUrl);
// Make a request to write in a separate thread and wait for it to return.
ExecutorService executorService = Executors.newSingleThreadExecutor();
Future<Integer> requestFuture = executorService.submit(new Callable<Integer>() {
@Override
public Integer call() throws Exception {
try {
URL url = new URL(String.format("%s/write/%s/%s/%d", baseUrl, AppWithServices.DATASET_TEST_KEY, AppWithServices.DATASET_TEST_VALUE, 10000));
HttpRequest request = HttpRequest.get(url).build();
HttpResponse response = HttpRequests.execute(request);
return response.getResponseCode();
} catch (Exception e) {
LOG.error("Request thread got exception.", e);
throw Throwables.propagate(e);
}
}
});
// The dataset should not be written by the time this request is made, since the transaction to write
// has not been committed yet.
URL url = new URL(String.format("%s/read/%s", baseUrl, AppWithServices.DATASET_TEST_KEY));
HttpRequest request = HttpRequest.get(url).build();
HttpResponse response = HttpRequests.execute(request);
Assert.assertEquals(204, response.getResponseCode());
// Wait for the transaction to commit.
Integer writeStatusCode = requestFuture.get();
Assert.assertEquals(200, writeStatusCode.intValue());
// Make the same request again. By now the transaction should've completed.
request = HttpRequest.get(url).build();
response = HttpRequests.execute(request);
Assert.assertEquals(200, response.getResponseCode());
Assert.assertEquals(AppWithServices.DATASET_TEST_VALUE, new Gson().fromJson(response.getResponseBodyAsString(), String.class));
executorService.shutdown();
serviceManager.stop();
serviceManager.waitForStatus(false);
DataSetManager<KeyValueTable> dsManager = getDataset(testSpace.dataset(AppWithServices.TRANSACTIONS_DATASET_NAME));
String value = Bytes.toString(dsManager.get().read(AppWithServices.DESTROY_KEY));
Assert.assertEquals(AppWithServices.VALUE, value);
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class TestFrameworkTestRun method testFlowletInitAndSetInstances.
@Test(timeout = 60000L)
public void testFlowletInitAndSetInstances() throws Exception {
ApplicationManager appManager = deployApplication(testSpace, DataSetInitApp.class);
FlowManager flowManager = appManager.getFlowManager("DataSetFlow").start();
RuntimeMetrics flowletMetrics = flowManager.getFlowletMetrics("Consumer");
flowletMetrics.waitForProcessed(1, 5, TimeUnit.SECONDS);
String generator = "Generator";
Assert.assertEquals(1, flowManager.getFlowletInstances(generator));
// Now change generator to 3 instances
flowManager.setFlowletInstances(generator, 3);
Assert.assertEquals(3, flowManager.getFlowletInstances(generator));
// Now should have 3 processed from the consumer flowlet
flowletMetrics.waitForProcessed(3, 10, TimeUnit.SECONDS);
// Now reset to 1 instances
flowManager.setFlowletInstances(generator, 1);
Assert.assertEquals(1, flowManager.getFlowletInstances(generator));
// Shouldn't have new item
TimeUnit.SECONDS.sleep(3);
Assert.assertEquals(3, flowletMetrics.getProcessed());
// Now set to 2 instances again. Since there is a new instance, expect one new item emitted
flowManager.setFlowletInstances(generator, 2);
Assert.assertEquals(2, flowManager.getFlowletInstances(generator));
flowletMetrics.waitForProcessed(4, 10, TimeUnit.SECONDS);
flowManager.stop();
DataSetManager<Table> dataSetManager = getDataset(testSpace.dataset("conf"));
Table confTable = dataSetManager.get();
Assert.assertEquals("generator", confTable.get(new Get("key", "column")).getString("column"));
dataSetManager.flush();
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class TestFrameworkTestRun method testDeployWorkflowApp.
@Category(XSlowTests.class)
@Test
@Ignore
public void testDeployWorkflowApp() throws Exception {
// Add test back when CDAP-12350 is resolved
ApplicationManager applicationManager = deployApplication(testSpace, AppWithSchedule.class);
final WorkflowManager wfmanager = applicationManager.getWorkflowManager(AppWithSchedule.WORKFLOW_NAME);
List<ScheduleDetail> schedules = wfmanager.getProgramSchedules();
Assert.assertEquals(2, schedules.size());
String scheduleName = schedules.get(1).getName();
Assert.assertNotNull(scheduleName);
Assert.assertFalse(scheduleName.isEmpty());
final int initialRuns = wfmanager.getHistory().size();
LOG.info("initialRuns = {}", initialRuns);
wfmanager.getSchedule(scheduleName).resume();
String status = wfmanager.getSchedule(scheduleName).status(200);
Assert.assertEquals("SCHEDULED", status);
// Make sure something ran before suspending
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return wfmanager.getHistory().size() > 0;
}
}, 15, TimeUnit.SECONDS);
wfmanager.getSchedule(scheduleName).suspend();
waitForScheduleState(scheduleName, wfmanager, ProgramScheduleStatus.SUSPENDED);
// All runs should be completed
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
for (RunRecord record : wfmanager.getHistory()) {
if (record.getStatus() != ProgramRunStatus.COMPLETED) {
return false;
}
}
return true;
}
}, 15, TimeUnit.SECONDS);
List<RunRecord> history = wfmanager.getHistory();
int workflowRuns = history.size();
LOG.info("workflowRuns = {}", workflowRuns);
Assert.assertTrue(workflowRuns > 0);
// Sleep for some time and verify there are no more scheduled jobs after the suspend.
TimeUnit.SECONDS.sleep(5);
final int workflowRunsAfterSuspend = wfmanager.getHistory().size();
Assert.assertEquals(workflowRuns, workflowRunsAfterSuspend);
wfmanager.getSchedule(scheduleName).resume();
// Check that after resume it goes to "SCHEDULED" state
waitForScheduleState(scheduleName, wfmanager, ProgramScheduleStatus.SCHEDULED);
// Make sure new runs happens after resume
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return wfmanager.getHistory().size() > workflowRunsAfterSuspend;
}
}, 15, TimeUnit.SECONDS);
// Check scheduled state
Assert.assertEquals("SCHEDULED", wfmanager.getSchedule(scheduleName).status(200));
// Check status of non-existent schedule
Assert.assertEquals("NOT_FOUND", wfmanager.getSchedule("doesnt exist").status(404));
// Suspend the schedule
wfmanager.getSchedule(scheduleName).suspend();
// Check that after suspend it goes to "SUSPENDED" state
waitForScheduleState(scheduleName, wfmanager, ProgramScheduleStatus.SUSPENDED);
// Test workflow token while suspended
String pid = history.get(0).getPid();
WorkflowTokenDetail workflowToken = wfmanager.getToken(pid, WorkflowToken.Scope.SYSTEM, null);
Assert.assertEquals(0, workflowToken.getTokenData().size());
workflowToken = wfmanager.getToken(pid, null, null);
Assert.assertEquals(2, workflowToken.getTokenData().size());
// Wait for all workflow runs to finish execution, in case more than one run happened with an enabled schedule
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
for (RunRecord record : wfmanager.getHistory()) {
if (record.getStatus() != ProgramRunStatus.COMPLETED) {
return false;
}
}
return true;
}
}, 15, TimeUnit.SECONDS);
// Verify workflow token after workflow completion
WorkflowTokenNodeDetail workflowTokenAtNode = wfmanager.getTokenAtNode(pid, AppWithSchedule.DummyAction.class.getSimpleName(), WorkflowToken.Scope.USER, "finished");
Assert.assertEquals(true, Boolean.parseBoolean(workflowTokenAtNode.getTokenDataAtNode().get("finished")));
workflowToken = wfmanager.getToken(pid, null, null);
Assert.assertEquals(false, Boolean.parseBoolean(workflowToken.getTokenData().get("running").get(0).getValue()));
}
use of co.cask.cdap.test.ApplicationManager in project cdap by caskdata.
the class TestFrameworkTestRun method testApp.
// todo: passing stream name as a workaround for not cleaning up streams during reset()
private void testApp(Class<? extends Application> app, String streamName) throws Exception {
ApplicationManager applicationManager = deployApplication(app);
FlowManager flowManager = applicationManager.getFlowManager("WordCountFlow").start();
// Send some inputs to streams
StreamManager streamManager = getStreamManager(streamName);
for (int i = 0; i < 100; i++) {
streamManager.send(ImmutableMap.of("title", "title " + i), "testing message " + i);
}
// Check the flowlet metrics
RuntimeMetrics flowletMetrics = flowManager.getFlowletMetrics("CountByField");
flowletMetrics.waitForProcessed(500, 10, TimeUnit.SECONDS);
Assert.assertEquals(0L, flowletMetrics.getException());
// Query the result
ServiceManager serviceManager = applicationManager.getServiceManager("WordFrequency").start();
serviceManager.waitForStatus(true, 2, 1);
// Verify the query result
Type resultType = new TypeToken<Map<String, Long>>() {
}.getType();
Map<String, Long> result = new Gson().fromJson(callServiceGet(serviceManager.getServiceURL(), "wordfreq/" + streamName + ":testing"), resultType);
Assert.assertNotNull(result);
Assert.assertEquals(100L, result.get(streamName + ":testing").longValue());
// check the metrics
RuntimeMetrics serviceMetrics = serviceManager.getMetrics();
serviceMetrics.waitForProcessed(1, 5, TimeUnit.SECONDS);
Assert.assertEquals(0L, serviceMetrics.getException());
Map<String, String> argsForMR = ImmutableMap.of("task.*." + SystemArguments.METRICS_CONTEXT_TASK_INCLUDED, "true");
// Run mapreduce job
MapReduceManager mrManager = applicationManager.getMapReduceManager("countTotal").start(argsForMR);
mrManager.waitForRun(ProgramRunStatus.COMPLETED, 1800L, TimeUnit.SECONDS);
testTaskTagLevelExists(WordCountApp.class.getSimpleName(), "countTotal", mrManager.getHistory().get(0).getPid(), "mydataset", true);
long totalCount = Long.valueOf(callServiceGet(serviceManager.getServiceURL(), "total"));
// every event has 5 tokens
Assert.assertEquals(5 * 100L, totalCount);
// Run mapreduce from stream
mrManager = applicationManager.getMapReduceManager("countFromStream").start();
mrManager.waitForRun(ProgramRunStatus.COMPLETED, 120L, TimeUnit.SECONDS);
totalCount = Long.valueOf(callServiceGet(serviceManager.getServiceURL(), "stream_total"));
// The stream MR only consume the body, not the header.
Assert.assertEquals(3 * 100L, totalCount);
DataSetManager<MyKeyValueTableDefinition.KeyValueTable> mydatasetManager = getDataset("mydataset");
Assert.assertEquals(100L, Long.valueOf(mydatasetManager.get().get("title:title")).longValue());
// also test the deprecated version of getDataset(). This can be removed when we remove the method
mydatasetManager = getDataset("mydataset");
Assert.assertEquals(100L, Long.valueOf(mydatasetManager.get().get("title:title")).longValue());
}
Aggregations