use of co.cask.cdap.proto.id.ProgramId in project cdap by caskdata.
the class RemoteRuntimeStoreTest method testWorkflowMethods.
@Test
public void testWorkflowMethods() {
ProgramId workflowId = new ProgramId(Id.Namespace.DEFAULT.getId(), "test_app", ProgramType.WORKFLOW, "test_workflow");
long stopTime = System.currentTimeMillis() / 1000;
long startTime = stopTime - 20;
String pid = RunIds.generate(startTime * 1000).getId();
String twillRunId = "twill_run_id";
Map<String, String> runtimeArgs = ImmutableMap.of();
Map<String, String> properties = ImmutableMap.of("runtimeArgs", GSON.toJson(runtimeArgs));
Map<String, String> systemArgs = ImmutableMap.of();
RunRecordMeta initialRunRecord = new RunRecordMeta(pid, startTime, null, ProgramRunStatus.RUNNING, properties, systemArgs, twillRunId);
runtimeStore.setStart(workflowId, pid, startTime, twillRunId, runtimeArgs, systemArgs);
Assert.assertEquals(initialRunRecord, store.getRun(workflowId, pid));
ProgramId mapreduceId = new ProgramId(workflowId.getNamespace(), workflowId.getApplication(), ProgramType.MAPREDUCE, "test_mr");
String mapreducePid = RunIds.generate(startTime * 1000).getId();
// these system properties just have to be set on the system arguments of the program, in order for it to be
// understood as a program in a workflow node
Map<String, String> mrSystemArgs = ImmutableMap.of(ProgramOptionConstants.WORKFLOW_NODE_ID, "test_node_id", ProgramOptionConstants.WORKFLOW_NAME, workflowId.getProgram(), ProgramOptionConstants.WORKFLOW_RUN_ID, pid);
runtimeStore.setStart(mapreduceId, mapreducePid, startTime, twillRunId, runtimeArgs, mrSystemArgs);
BasicThrowable failureCause = new BasicThrowable(new IllegalArgumentException("failure", new RuntimeException("oops")));
runtimeStore.setStop(mapreduceId, mapreducePid, stopTime, ProgramRunStatus.FAILED, failureCause);
runtimeStore.setStop(workflowId, pid, stopTime, ProgramRunStatus.FAILED);
RunRecordMeta completedWorkflowRecord = store.getRun(workflowId, pid);
// we're not comparing properties, since runtime (such as starting/stopping inner programs) modifies it
Assert.assertEquals(pid, completedWorkflowRecord.getPid());
Assert.assertEquals(initialRunRecord.getStartTs(), completedWorkflowRecord.getStartTs());
Assert.assertEquals((Long) stopTime, completedWorkflowRecord.getStopTs());
Assert.assertEquals(ProgramRunStatus.FAILED, completedWorkflowRecord.getStatus());
Assert.assertEquals(twillRunId, completedWorkflowRecord.getTwillRunId());
Assert.assertEquals(systemArgs, completedWorkflowRecord.getSystemArgs());
// test that the BasicThrowable was serialized properly by RemoteRuntimeStore
ProgramRunId workflowRunId = workflowId.run(pid);
List<WorkflowNodeStateDetail> workflowNodeStates = store.getWorkflowNodeStates(workflowRunId);
Assert.assertEquals(1, workflowNodeStates.size());
WorkflowNodeStateDetail workflowNodeStateDetail = workflowNodeStates.get(0);
Assert.assertEquals("test_node_id", workflowNodeStateDetail.getNodeId());
Assert.assertEquals(mapreducePid, workflowNodeStateDetail.getRunId());
Assert.assertEquals(NodeStatus.FAILED, workflowNodeStateDetail.getNodeStatus());
Assert.assertEquals(failureCause, workflowNodeStateDetail.getFailureCause());
}
use of co.cask.cdap.proto.id.ProgramId in project cdap by caskdata.
the class RemoteRuntimeStoreTest method testSimpleCase.
@Test
public void testSimpleCase() {
ProgramId flowId = new ProgramId(Id.Namespace.DEFAULT.getId(), "test_app", ProgramType.FLOW, "test_flow");
long stopTime = System.currentTimeMillis() / 2000;
long startTime = stopTime - 20;
String pid = RunIds.generate(startTime * 1000).getId();
// to test null serialization (setStart can take in nullable)
String twillRunId = null;
Map<String, String> runtimeArgs = ImmutableMap.of();
Map<String, String> properties = ImmutableMap.of("runtimeArgs", GSON.toJson(runtimeArgs));
Map<String, String> systemArgs = ImmutableMap.of("a", "b");
RunRecordMeta initialRunRecord = new RunRecordMeta(pid, startTime, null, ProgramRunStatus.RUNNING, properties, systemArgs, twillRunId);
runtimeStore.setStart(flowId, pid, startTime, twillRunId, runtimeArgs, systemArgs);
RunRecordMeta runMeta = store.getRun(flowId, pid);
Assert.assertEquals(initialRunRecord, runMeta);
runtimeStore.setSuspend(flowId, pid);
Assert.assertEquals(new RunRecordMeta(initialRunRecord, null, ProgramRunStatus.SUSPENDED), store.getRun(flowId, pid));
runtimeStore.setResume(flowId, pid);
Assert.assertEquals(initialRunRecord, store.getRun(flowId, pid));
runtimeStore.setStop(flowId, pid, stopTime, ProgramRunStatus.COMPLETED);
RunRecordMeta runRecordMeta = store.getRun(flowId, pid);
RunRecordMeta finalRunRecord = new RunRecordMeta(initialRunRecord, stopTime, ProgramRunStatus.COMPLETED);
Assert.assertEquals(finalRunRecord, runRecordMeta);
}
use of co.cask.cdap.proto.id.ProgramId in project cdap by caskdata.
the class WorkflowHttpHandlerTest method testStreamSizeSchedules.
@Test
public void testStreamSizeSchedules() throws Exception {
// Steps for the test:
// 1. Deploy the app
// 2. Verify the schedules
// 3. Ingest data in the stream
// 4. Verify the history after waiting a while
// 5. Suspend the schedule
// 6. Ingest data in the stream
// 7. Verify there are no runs after the suspend by looking at the history
// 8. Resume the schedule
// 9. Verify there are runs after the resume by looking at the history
String appName = "AppWithStreamSizeSchedule";
String sampleSchedule1 = "SampleSchedule1";
String sampleSchedule2 = "SampleSchedule2";
String workflowName = "SampleWorkflow";
String streamName = "stream";
Id.Program programId = Id.Program.from(TEST_NAMESPACE2, appName, ProgramType.WORKFLOW, workflowName);
StringBuilder longStringBuilder = new StringBuilder();
for (int i = 0; i < 10000; i++) {
longStringBuilder.append("dddddddddd");
}
String longString = longStringBuilder.toString();
// deploy app with schedule in namespace 2
HttpResponse response = deploy(AppWithStreamSizeSchedule.class, Constants.Gateway.API_VERSION_3_TOKEN, TEST_NAMESPACE2);
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
Assert.assertEquals(200, resumeSchedule(TEST_NAMESPACE2, appName, sampleSchedule1));
Assert.assertEquals(200, resumeSchedule(TEST_NAMESPACE2, appName, sampleSchedule2));
// get schedules
List<ScheduleDetail> schedules = getSchedules(TEST_NAMESPACE2, appName, workflowName);
Assert.assertEquals(2, schedules.size());
String scheduleName1 = schedules.get(0).getName();
String scheduleName2 = schedules.get(1).getName();
Assert.assertNotNull(scheduleName1);
Assert.assertFalse(scheduleName1.isEmpty());
// Change notification threshold for stream
response = doPut(String.format("/v3/namespaces/%s/streams/%s/properties", TEST_NAMESPACE2, streamName), "{'notification.threshold.mb': 1}");
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
response = doGet(String.format("/v3/namespaces/%s/streams/%s", TEST_NAMESPACE2, streamName));
String json = EntityUtils.toString(response.getEntity());
StreamProperties properties = new Gson().fromJson(json, StreamProperties.class);
Assert.assertEquals(1, properties.getNotificationThresholdMB().intValue());
// Ingest over 1MB of data in stream
for (int i = 0; i < 12; ++i) {
response = doPost(String.format("/v3/namespaces/%s/streams/%s", TEST_NAMESPACE2, streamName), longString);
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
}
// Only schedule 1 should get executed
verifyProgramRuns(programId, "completed");
//Check schedule status
assertSchedule(programId, scheduleName1, true, 30, TimeUnit.SECONDS);
assertSchedule(programId, scheduleName2, true, 30, TimeUnit.SECONDS);
Assert.assertEquals(200, suspendSchedule(TEST_NAMESPACE2, appName, scheduleName1));
Assert.assertEquals(200, suspendSchedule(TEST_NAMESPACE2, appName, scheduleName2));
//check paused state
assertSchedule(programId, scheduleName1, false, 30, TimeUnit.SECONDS);
assertSchedule(programId, scheduleName2, false, 30, TimeUnit.SECONDS);
int workflowRuns = getProgramRuns(programId, "completed").size();
// Should still be one
Assert.assertEquals(1, workflowRuns);
// Sleep for some time and verify there are no more scheduled jobs after the suspend.
for (int i = 0; i < 12; ++i) {
response = doPost(String.format("/v3/namespaces/%s/streams/%s", TEST_NAMESPACE2, streamName), longString);
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
}
TimeUnit.SECONDS.sleep(5);
int workflowRunsAfterSuspend = getProgramRuns(programId, "completed").size();
Assert.assertEquals(workflowRuns, workflowRunsAfterSuspend);
Assert.assertEquals(200, resumeSchedule(TEST_NAMESPACE2, appName, scheduleName1));
//check scheduled state
assertSchedule(programId, scheduleName1, true, 30, TimeUnit.SECONDS);
// an additional run should execute and complete after resuming the schedule
assertRunHistory(programId, "completed", 1 + workflowRunsAfterSuspend, 60, TimeUnit.SECONDS);
//Check status of a non existing schedule
try {
assertSchedule(programId, "invalid", true, 2, TimeUnit.SECONDS);
Assert.fail();
} catch (Exception e) {
// expected
}
Assert.assertEquals(200, suspendSchedule(TEST_NAMESPACE2, appName, scheduleName1));
//check paused state
assertSchedule(programId, scheduleName1, false, 30, TimeUnit.SECONDS);
//Schedule operations using invalid namespace
try {
assertSchedule(Id.Program.from(TEST_NAMESPACE1, appName, ProgramType.WORKFLOW, workflowName), scheduleName1, true, 2, TimeUnit.SECONDS);
Assert.fail();
} catch (Exception e) {
// expected
}
Assert.assertEquals(404, suspendSchedule(TEST_NAMESPACE1, appName, scheduleName1));
Assert.assertEquals(404, resumeSchedule(TEST_NAMESPACE1, appName, scheduleName1));
// Wait until any running jobs just before suspend call completes.
TimeUnit.SECONDS.sleep(2);
}
use of co.cask.cdap.proto.id.ProgramId in project cdap by caskdata.
the class MetadataHttpHandler method addProgramTags.
@POST
@Path("/namespaces/{namespace-id}/apps/{app-id}/{program-type}/{program-id}/metadata/tags")
@AuditPolicy(AuditDetail.REQUEST_BODY)
public void addProgramTags(HttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("app-id") String appId, @PathParam("program-type") String programType, @PathParam("program-id") String programId) throws BadRequestException, NotFoundException {
ProgramId program = new ProgramId(namespaceId, appId, ProgramType.valueOfCategoryName(programType), programId);
metadataAdmin.addTags(program, readArray(request));
responder.sendString(HttpResponseStatus.OK, String.format("Added tags to program %s successfully.", program));
}
use of co.cask.cdap.proto.id.ProgramId in project cdap by caskdata.
the class MetadataHttpHandler method addProgramProperties.
@POST
@Path("/namespaces/{namespace-id}/apps/{app-id}/{program-type}/{program-id}/metadata/properties")
@AuditPolicy(AuditDetail.REQUEST_BODY)
public void addProgramProperties(HttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("app-id") String appId, @PathParam("program-type") String programType, @PathParam("program-id") String programId) throws BadRequestException, NotFoundException {
ProgramId program = new ProgramId(namespaceId, appId, ProgramType.valueOfCategoryName(programType), programId);
metadataAdmin.addProperties(program, readMetadata(request));
responder.sendString(HttpResponseStatus.OK, "Metadata added successfully to " + program);
}
Aggregations