use of io.cdap.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.
the class ProgramLifecycleHttpHandlerTest method testAddSchedule.
private void testAddSchedule(String scheduleName) throws Exception {
String partitionScheduleName = scheduleName + "Partition";
String orScheduleName = scheduleName + "Or";
ProtoTrigger.TimeTrigger protoTime = new ProtoTrigger.TimeTrigger("0 * * * ?");
ProtoTrigger.PartitionTrigger protoPartition = new ProtoTrigger.PartitionTrigger(NamespaceId.DEFAULT.dataset("data"), 5);
ProtoTrigger.OrTrigger protoOr = ProtoTrigger.or(protoTime, protoPartition);
String description = "Something";
ScheduleProgramInfo programInfo = new ScheduleProgramInfo(SchedulableProgramType.WORKFLOW, AppWithSchedule.WORKFLOW_NAME);
ImmutableMap<String, String> properties = ImmutableMap.of("a", "b", "c", "d");
TimeTrigger timeTrigger = new TimeTrigger("0 * * * ?");
ScheduleDetail timeDetail = new ScheduleDetail(TEST_NAMESPACE1, AppWithSchedule.NAME, ApplicationId.DEFAULT_VERSION, scheduleName, description, programInfo, properties, timeTrigger, Collections.emptyList(), Schedulers.JOB_QUEUE_TIMEOUT_MILLIS, null, null);
PartitionTrigger partitionTrigger = new PartitionTrigger(protoPartition.getDataset(), protoPartition.getNumPartitions());
ScheduleDetail expectedPartitionDetail = new ScheduleDetail(TEST_NAMESPACE1, AppWithSchedule.NAME, ApplicationId.DEFAULT_VERSION, partitionScheduleName, description, programInfo, properties, partitionTrigger, Collections.emptyList(), Schedulers.JOB_QUEUE_TIMEOUT_MILLIS, null, null);
ScheduleDetail requestPartitionDetail = new ScheduleDetail(TEST_NAMESPACE1, AppWithSchedule.NAME, ApplicationId.DEFAULT_VERSION, partitionScheduleName, description, programInfo, properties, protoPartition, Collections.emptyList(), Schedulers.JOB_QUEUE_TIMEOUT_MILLIS, null, null);
ScheduleDetail expectedOrDetail = new ScheduleDetail(TEST_NAMESPACE1, AppWithSchedule.NAME, ApplicationId.DEFAULT_VERSION, orScheduleName, description, programInfo, properties, new OrTrigger(timeTrigger, partitionTrigger), Collections.emptyList(), Schedulers.JOB_QUEUE_TIMEOUT_MILLIS, null, null);
ScheduleDetail requestOrDetail = new ScheduleDetail(TEST_NAMESPACE1, AppWithSchedule.NAME, ApplicationId.DEFAULT_VERSION, orScheduleName, description, programInfo, properties, protoOr, Collections.emptyList(), Schedulers.JOB_QUEUE_TIMEOUT_MILLIS, null, null);
// trying to add the schedule with different name in path param than schedule spec should fail
HttpResponse response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, null, "differentName", timeDetail);
Assert.assertEquals(HttpResponseStatus.BAD_REQUEST.code(), response.getResponseCode());
// adding a schedule to a non-existing app should fail
response = addSchedule(TEST_NAMESPACE1, "nonExistingApp", null, scheduleName, timeDetail);
Assert.assertEquals(HttpResponseStatus.NOT_FOUND.code(), response.getResponseCode());
// adding a schedule to invalid type of program type should fail
ScheduleDetail invalidScheduleDetail = new ScheduleDetail(scheduleName, "Something", new ScheduleProgramInfo(SchedulableProgramType.MAPREDUCE, AppWithSchedule.MAPREDUCE), properties, protoTime, Collections.emptyList(), TimeUnit.MINUTES.toMillis(1));
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, null, scheduleName, invalidScheduleDetail);
Assert.assertEquals(HttpResponseStatus.BAD_REQUEST.code(), response.getResponseCode());
// adding a schedule for a program that does not exist
ScheduleDetail nonExistingDetail = new ScheduleDetail(TEST_NAMESPACE1, AppWithSchedule.NAME, ApplicationId.DEFAULT_VERSION, scheduleName, description, new ScheduleProgramInfo(SchedulableProgramType.MAPREDUCE, "nope"), properties, timeTrigger, Collections.emptyList(), Schedulers.JOB_QUEUE_TIMEOUT_MILLIS, null, null);
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, null, scheduleName, nonExistingDetail);
Assert.assertEquals(HttpResponseStatus.NOT_FOUND.code(), response.getResponseCode());
// test adding a schedule
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, null, scheduleName, timeDetail);
Assert.assertEquals(HttpResponseStatus.OK.code(), response.getResponseCode());
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, null, partitionScheduleName, requestPartitionDetail);
Assert.assertEquals(HttpResponseStatus.OK.code(), response.getResponseCode());
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, null, orScheduleName, requestOrDetail);
Assert.assertEquals(HttpResponseStatus.OK.code(), response.getResponseCode());
List<ScheduleDetail> schedules = getSchedules(TEST_NAMESPACE1, AppWithSchedule.NAME, AppWithSchedule.WORKFLOW_NAME);
Assert.assertEquals(4, schedules.size());
Assert.assertEquals(timeDetail, schedules.get(1));
Assert.assertEquals(expectedOrDetail, schedules.get(2));
Assert.assertEquals(expectedPartitionDetail, schedules.get(3));
List<ScheduleDetail> schedulesForApp = listSchedules(TEST_NAMESPACE1, AppWithSchedule.NAME, null);
Assert.assertEquals(schedules, schedulesForApp);
// trying to add ScheduleDetail of the same schedule again should fail with AlreadyExistsException
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, null, scheduleName, timeDetail);
Assert.assertEquals(HttpResponseStatus.CONFLICT.code(), response.getResponseCode());
// although we should be able to add schedule to a different version of the app
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, VERSION2, scheduleName, timeDetail);
Assert.assertEquals(HttpResponseStatus.OK.code(), response.getResponseCode());
// this should not have affected the schedules of the default version
List<ScheduleDetail> scheds = getSchedules(TEST_NAMESPACE1, AppWithSchedule.NAME, AppWithSchedule.WORKFLOW_NAME);
Assert.assertEquals(schedules, scheds);
// there should be two schedules now for version 2
List<ScheduleDetail> schedules2 = getSchedules(TEST_NAMESPACE1, AppWithSchedule.NAME, VERSION2, AppWithSchedule.WORKFLOW_NAME);
Assert.assertEquals(2, schedules2.size());
Assert.assertEquals(timeDetail, schedules2.get(1));
List<ScheduleDetail> schedulesForApp2 = listSchedules(TEST_NAMESPACE1, AppWithSchedule.NAME, VERSION2);
Assert.assertEquals(schedules2, schedulesForApp2);
// Add a schedule with no schedule name in spec
ScheduleDetail detail2 = new ScheduleDetail(TEST_NAMESPACE1, AppWithSchedule.NAME, VERSION2, null, "Something 2", programInfo, properties, new TimeTrigger("0 * * * ?"), Collections.emptyList(), TimeUnit.HOURS.toMillis(6), null, null);
response = addSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, VERSION2, "schedule-100", detail2);
Assert.assertEquals(HttpResponseStatus.OK.code(), response.getResponseCode());
ScheduleDetail detail100 = getSchedule(TEST_NAMESPACE1, AppWithSchedule.NAME, VERSION2, "schedule-100");
Assert.assertEquals("schedule-100", detail100.getName());
Assert.assertEquals(detail2.getTimeoutMillis(), detail100.getTimeoutMillis());
}
use of io.cdap.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.
the class ConcurrencyConstraintTest method testMaxConcurrentRuns.
@Test
public void testMaxConcurrentRuns() {
Store store = AppFabricTestHelper.getInjector().getInstance(Store.class);
try {
long now = System.currentTimeMillis();
ProgramSchedule schedule = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.of());
SimpleJob job = new SimpleJob(schedule, 0, now, Collections.emptyList(), Job.State.PENDING_TRIGGER, 0L);
ConcurrencyConstraint concurrencyConstraint = new ConcurrencyConstraint(2);
ConstraintContext constraintContext = new ConstraintContext(job, now, store);
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
ProgramRunId pid1 = WORKFLOW_ID.run(RunIds.generate().getId());
ProgramRunId pid2 = WORKFLOW_ID.run(RunIds.generate().getId());
ProgramRunId pid3 = WORKFLOW_ID.run(RunIds.generate().getId());
// add a run for the schedule
Map<String, String> systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, schedule.getName());
setStartAndRunning(store, pid1, EMPTY_MAP, systemArgs);
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// add a run for the program from a different schedule. Since there are now 2 running instances of the
// workflow (regardless of the schedule name), the constraint is not met
systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, "not" + schedule.getName());
setStartAndRunning(store, pid2, EMPTY_MAP, systemArgs);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// add a run for the program that wasn't from a schedule
// there are now three concurrent runs, so the constraint will not be met
setStartAndRunning(store, pid3);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// stop the first program; constraint will not be satisfied as there are still 2 running
store.setStop(pid1, System.currentTimeMillis(), ProgramRunStatus.COMPLETED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// suspending/resuming the workflow doesn't reduce its concurrency count
store.setSuspend(pid3, AppFabricTestHelper.createSourceId(++sourceId), -1);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
store.setResume(pid3, AppFabricTestHelper.createSourceId(++sourceId), -1);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// but the constraint will be satisfied with it completes, as there is only 1 remaining RUNNING
store.setStop(pid3, System.currentTimeMillis(), ProgramRunStatus.KILLED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// add a run in provisioning state, constraint will not be satisfied since active runs increased to 2
ProgramRunId pid4 = WORKFLOW_ID.run(RunIds.generate().getId());
setProvisioning(store, pid4, Collections.emptyMap(), Collections.emptyMap());
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// stop the provisioning run, constraint will be satisfied since active runs decreased to 1
store.setStop(pid4, System.currentTimeMillis(), ProgramRunStatus.FAILED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// stopping the last running workflow will also satisfy the constraint
store.setStop(pid2, System.currentTimeMillis(), ProgramRunStatus.FAILED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
} finally {
AppFabricTestHelper.shutdown();
}
}
use of io.cdap.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.
the class JobQueueTableTest method testJobTimeout.
@Test
public void testJobTimeout() {
TransactionRunners.run(transactionRunner, context -> {
JobQueueTable jobQueue = JobQueueTable.getJobQueue(context, getCConf());
// should be 0 jobs in the JobQueue to begin with
Assert.assertEquals(0, getAllJobs(jobQueue, false).size());
// Construct a partition notification with DATASET_ID
Notification notification = Notification.forPartitions(DATASET_ID, ImmutableList.of());
Assert.assertNull(jobQueue.getJob(SCHED1_JOB.getJobKey()));
ProgramSchedule scheduleWithTimeout = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.of());
Job jobWithTimeout = new SimpleJob(scheduleWithTimeout, 0, System.currentTimeMillis() - (Schedulers.JOB_QUEUE_TIMEOUT_MILLIS + 1), Lists.newArrayList(), Job.State.PENDING_TRIGGER, 0L);
jobQueue.put(jobWithTimeout);
Assert.assertEquals(jobWithTimeout, jobQueue.getJob(jobWithTimeout.getJobKey()));
// before adding the notification, there should just be the job we added
Assert.assertEquals(1, toSet(jobQueue.getJobsForSchedule(scheduleWithTimeout.getScheduleId()), true).size());
// adding a notification will ignore the existing job (because it is timed out). It will create a new job
// and add the notification to that new job
jobQueue.addNotification(new ProgramScheduleRecord(SCHED1, new ProgramScheduleMeta(ProgramScheduleStatus.SCHEDULED, 0L)), notification);
List<Job> jobs = new ArrayList<>(toSet(jobQueue.getJobsForSchedule(scheduleWithTimeout.getScheduleId()), true));
// sort by creation time (oldest will be first in the list)
Collections.sort(jobs, (o1, o2) -> Long.valueOf(o1.getCreationTime()).compareTo(o2.getCreationTime()));
Assert.assertEquals(2, jobs.size());
Job firstJob = jobs.get(0);
// first job should have the same creation timestamp as the initially added job
Assert.assertEquals(jobWithTimeout.getCreationTime(), firstJob.getCreationTime());
// the notification we added shouldn't be in the first job
Assert.assertEquals(0, firstJob.getNotifications().size());
// first job should be marked to be deleted because it timed out
Assert.assertTrue(firstJob.isToBeDeleted());
// first job should have the same generation id as the timed out job
Assert.assertEquals(jobWithTimeout.getGenerationId(), firstJob.getGenerationId());
Job secondJob = jobs.get(1);
// first job should not have the same creation timestamp as the initially added job
Assert.assertNotEquals(jobWithTimeout.getCreationTime(), secondJob.getCreationTime());
// the notification we added shouldn't be in the first job
Assert.assertEquals(1, secondJob.getNotifications().size());
Assert.assertEquals(notification, secondJob.getNotifications().get(0));
// first job should not be marked to be deleted, since it was just created by our call to
// JobQueue#addNotification
Assert.assertFalse(secondJob.isToBeDeleted());
// second job should have the next generation id
Assert.assertEquals(jobWithTimeout.getGenerationId() + 1, secondJob.getGenerationId());
});
}
use of io.cdap.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.
the class JobQueueTableTest method testJobQueueIteration.
@Test
public void testJobQueueIteration() {
// Test that getJobs can be leveraged to iterate over the JobQueue partially (and across transactions).
// The Job last consumed from the Iterator returned in the first iteration can be passed to the next call to resume
// scanning from
TransactionRunners.run(transactionRunner, context -> {
JobQueueTable jobQueue = JobQueueTable.getJobQueue(context, getCConf());
// should be 0 jobs in the JobQueue to begin with
Assert.assertEquals(0, getAllJobs(jobQueue).size());
Multimap<Integer, Job> jobsByPartition = HashMultimap.create();
long now = 1494353984967L;
for (int i = 0; i < 100; i++) {
ProgramSchedule schedule = new ProgramSchedule("sched" + i, "one partition schedule", WORKFLOW_ID, ImmutableMap.of(), new PartitionTrigger(DATASET_ID, 1), ImmutableList.of());
Job job = new SimpleJob(schedule, i, now + i, ImmutableList.of(), Job.State.PENDING_TRIGGER, 0L);
jobsByPartition.put(jobQueue.getPartition(schedule.getScheduleId()), job);
jobQueue.put(job);
}
// in partition 0, there should be at least two Jobs for us to even test resumption of job queue iteration
Set<Job> partitionZeroJobs = new HashSet<>(jobsByPartition.get(0));
Assert.assertTrue(partitionZeroJobs.size() > 1);
// sanity check that all 100 jobs are in the JobQueue
Assert.assertEquals(jobsByPartition.size(), getAllJobs(jobQueue).size());
Assert.assertEquals(partitionZeroJobs, toSet(jobQueue.getJobs(0, null)));
// consume just 1 job in the first partition. Then, use it to specify the exclusive starting point in the
// next call to getJobs
Job firstConsumedJob;
try (CloseableIterator<Job> partitionZeroJobsIter = jobQueue.getJobs(0, null)) {
Assert.assertTrue(partitionZeroJobsIter.hasNext());
firstConsumedJob = partitionZeroJobsIter.next();
}
// the Jobs consumed in the second iteration should be all except the Job consumed in the first iteration
Set<Job> consumedInSecondIteration = toSet(jobQueue.getJobs(0, firstConsumedJob));
Assert.assertEquals(partitionZeroJobs.size() - 1, consumedInSecondIteration.size());
Set<Job> consumedJobs = new HashSet<>(consumedInSecondIteration);
consumedJobs.add(firstConsumedJob);
Assert.assertEquals(partitionZeroJobs, consumedJobs);
});
}
use of io.cdap.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.
the class CoreSchedulerServiceTest method addListDeleteSchedules.
@Test
public void addListDeleteSchedules() throws Exception {
// verify that list returns nothing
Assert.assertTrue(scheduler.listSchedules(APP1_ID).isEmpty());
Assert.assertTrue(scheduler.listSchedules(PROG1_ID).isEmpty());
// add a schedule for app1
ProgramSchedule tsched1 = new ProgramSchedule("tsched1", "one time schedule", PROG1_ID, ImmutableMap.of("prop1", "nn"), new TimeTrigger("* * ? * 1"), ImmutableList.<Constraint>of());
scheduler.addSchedule(tsched1);
Assert.assertEquals(tsched1, scheduler.getSchedule(TSCHED1_ID));
Assert.assertEquals(ImmutableList.of(tsched1), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(tsched1), scheduler.listSchedules(PROG1_ID));
// add three more schedules, one for the same program, one for the same app, one for another app
ProgramSchedule psched1 = new ProgramSchedule("psched1", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), Collections.emptyList());
ProgramSchedule tsched11 = new ProgramSchedule("tsched11", "two times schedule", PROG11_ID, ImmutableMap.of("prop2", "xx"), new TimeTrigger("* * ? * 1,2"), Collections.emptyList());
ProgramSchedule psched2 = new ProgramSchedule("psched2", "two partition schedule", PROG2_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), Collections.emptyList());
scheduler.addSchedules(ImmutableList.of(psched1, tsched11, psched2));
Assert.assertEquals(psched1, scheduler.getSchedule(PSCHED1_ID));
Assert.assertEquals(tsched11, scheduler.getSchedule(TSCHED11_ID));
Assert.assertEquals(psched2, scheduler.getSchedule(PSCHED2_ID));
// list by app and program
Assert.assertEquals(ImmutableList.of(psched1, tsched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// delete one schedule
scheduler.deleteSchedule(TSCHED1_ID);
verifyNotFound(scheduler, TSCHED1_ID);
Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// attempt to delete it again along with another one that exists
try {
scheduler.deleteSchedules(ImmutableList.of(TSCHED1_ID, TSCHED11_ID));
Assert.fail("expected NotFoundException");
} catch (NotFoundException e) {
// expected
}
Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// attempt to add it back together with a schedule that exists
try {
scheduler.addSchedules(ImmutableList.of(tsched1, tsched11));
Assert.fail("expected AlreadyExistsException");
} catch (AlreadyExistsException e) {
// expected
}
Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// add it back, delete all schedules for one app
scheduler.addSchedule(tsched1);
scheduler.deleteSchedules(APP1_ID);
verifyNotFound(scheduler, TSCHED1_ID);
verifyNotFound(scheduler, PSCHED1_ID);
verifyNotFound(scheduler, TSCHED11_ID);
Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
}
Aggregations