use of co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class CoreSchedulerServiceTest method testScheduleUpdate.
private void testScheduleUpdate(String howToUpdate) throws Exception {
int runs = getRuns(WORKFLOW_2);
ScheduleId scheduleId2 = APP_ID.schedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
// send one notification to it, that will not start the workflow
publishNotification(dataEventTopic, WORKFLOW_2, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// give it enough time to create the job
TimeUnit.SECONDS.sleep(5);
Assert.assertEquals(runs, getRuns(WORKFLOW_2));
if ("disable".equals(howToUpdate)) {
// disabling and enabling the schedule should remove the job
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
} else {
ProgramSchedule schedule = scheduler.getSchedule(scheduleId2);
Map<String, String> updatedProperties = ImmutableMap.<String, String>builder().putAll(schedule.getProperties()).put(howToUpdate, howToUpdate).build();
ProgramSchedule updatedSchedule = new ProgramSchedule(schedule.getName(), schedule.getDescription(), schedule.getProgramId(), updatedProperties, schedule.getTrigger(), schedule.getConstraints());
if ("update".equals(howToUpdate)) {
scheduler.updateSchedule(updatedSchedule);
Assert.assertEquals(ProgramScheduleStatus.SCHEDULED, scheduler.getScheduleStatus(scheduleId2));
} else if ("delete".equals(howToUpdate)) {
scheduler.deleteSchedule(scheduleId2);
scheduler.addSchedule(updatedSchedule);
enableSchedule(scheduleId2.getSchedule());
} else {
Assert.fail("invalid howToUpdate: " + howToUpdate);
}
}
// single notification should not trigger workflow 2 yet (if it does, then the job was not removed)
publishNotification(dataEventTopic, WORKFLOW_2, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// give it enough time to create the job, but should not start
TimeUnit.SECONDS.sleep(10);
Assert.assertEquals(runs, getRuns(WORKFLOW_2));
// now this should kick off the workflow
publishNotification(dataEventTopic, WORKFLOW_2, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
waitForCompleteRuns(runs + 1, WORKFLOW_2);
}
use of co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class CoreSchedulerServiceTest method addListDeleteSchedules.
@Test
public void addListDeleteSchedules() throws Exception {
// verify that list returns nothing
Assert.assertTrue(scheduler.listSchedules(APP1_ID).isEmpty());
Assert.assertTrue(scheduler.listSchedules(PROG1_ID).isEmpty());
// add a schedule for app1
ProgramSchedule tsched1 = new ProgramSchedule("tsched1", "one time schedule", PROG1_ID, ImmutableMap.of("prop1", "nn"), new TimeTrigger("* * ? * 1"), ImmutableList.<Constraint>of());
scheduler.addSchedule(tsched1);
Assert.assertEquals(tsched1, scheduler.getSchedule(TSCHED1_ID));
Assert.assertEquals(ImmutableList.of(tsched1), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(tsched1), scheduler.listSchedules(PROG1_ID));
// add three more schedules, one for the same program, one for the same app, one for another app
ProgramSchedule psched1 = new ProgramSchedule("psched1", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
ProgramSchedule tsched11 = new ProgramSchedule("tsched11", "two times schedule", PROG11_ID, ImmutableMap.of("prop2", "xx"), new TimeTrigger("* * ? * 1,2"), ImmutableList.<Constraint>of());
ProgramSchedule psched2 = new ProgramSchedule("psched2", "two partition schedule", PROG2_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
scheduler.addSchedules(ImmutableList.of(psched1, tsched11, psched2));
Assert.assertEquals(psched1, scheduler.getSchedule(PSCHED1_ID));
Assert.assertEquals(tsched11, scheduler.getSchedule(TSCHED11_ID));
Assert.assertEquals(psched2, scheduler.getSchedule(PSCHED2_ID));
// list by app and program
Assert.assertEquals(ImmutableList.of(psched1, tsched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// delete one schedule
scheduler.deleteSchedule(TSCHED1_ID);
verifyNotFound(scheduler, TSCHED1_ID);
Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// attempt to delete it again along with another one that exists
try {
scheduler.deleteSchedules(ImmutableList.of(TSCHED1_ID, TSCHED11_ID));
Assert.fail("expected NotFoundException");
} catch (NotFoundException e) {
// expected
}
Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// attempt to add it back together with a schedule that exists
try {
scheduler.addSchedules(ImmutableList.of(tsched1, tsched11));
Assert.fail("expected AlreadyExistsException");
} catch (AlreadyExistsException e) {
// expected
}
Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
// add it back, delete all schedules for one app
scheduler.addSchedule(tsched1);
scheduler.deleteSchedules(APP1_ID);
verifyNotFound(scheduler, TSCHED1_ID);
verifyNotFound(scheduler, PSCHED1_ID);
verifyNotFound(scheduler, TSCHED11_ID);
Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(PROG1_ID));
Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(PROG11_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(APP1_ID));
Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
}
use of co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class LastRunConstraintTest method testLastRunConstraint.
@Test
public void testLastRunConstraint() {
Store store = AppFabricTestHelper.getInjector().getInstance(Store.class);
long now = System.currentTimeMillis();
long nowSec = TimeUnit.MILLISECONDS.toSeconds(now);
ProgramSchedule schedule = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.<Constraint>of());
SimpleJob job = new SimpleJob(schedule, now, Collections.<Notification>emptyList(), Job.State.PENDING_TRIGGER, 0L);
// require 1 hour since last run
LastRunConstraint lastRunConstraint = new LastRunConstraint(1, TimeUnit.HOURS);
ConstraintContext constraintContext = new ConstraintContext(job, now, store);
// there's been no runs, so the constraint is satisfied by default
assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
String pid1 = RunIds.generate().getId();
String pid2 = RunIds.generate().getId();
String pid3 = RunIds.generate().getId();
String pid4 = RunIds.generate().getId();
// a RUNNING workflow, started 3 hours ago will fail the constraint check
Map<String, String> systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, schedule.getName());
store.setStart(WORKFLOW_ID, pid1, nowSec - TimeUnit.HOURS.toSeconds(3), null, EMPTY_MAP, systemArgs);
assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
// a SUSPENDED workflow started 3 hours ago will also fail the constraint check
store.setSuspend(WORKFLOW_ID, pid1);
assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
store.setResume(WORKFLOW_ID, pid1);
assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
// if that same workflow runs completes 2 hours ago, the constraint check will be satisfied
store.setStop(WORKFLOW_ID, pid1, nowSec - TimeUnit.HOURS.toSeconds(2), ProgramRunStatus.COMPLETED);
assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
// a RUNNING workflow, started 2 hours ago will fail the constraint check
store.setStart(WORKFLOW_ID, pid2, nowSec - TimeUnit.HOURS.toSeconds(2), null, EMPTY_MAP, EMPTY_MAP);
assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
// if that same workflow run fails 1 minute ago, the constraint check will be satisfied
store.setStop(WORKFLOW_ID, pid2, nowSec - TimeUnit.MINUTES.toSeconds(1), ProgramRunStatus.FAILED);
assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
// similarly, a KILLED workflow, started 2 hours ago will also fail the constraint check
store.setStart(WORKFLOW_ID, pid3, nowSec - TimeUnit.HOURS.toSeconds(2), null, EMPTY_MAP, EMPTY_MAP);
assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
store.setStop(WORKFLOW_ID, pid3, nowSec - TimeUnit.MINUTES.toSeconds(1), ProgramRunStatus.KILLED);
assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
// a RUNNING workflow, started 2 hours ago will fail the constraint check
store.setStart(WORKFLOW_ID, pid4, nowSec - TimeUnit.HOURS.toSeconds(2), null, EMPTY_MAP, EMPTY_MAP);
assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
// if that same workflow runs completes 1 minute ago, the constraint check will not be satisfied
store.setStop(WORKFLOW_ID, pid4, nowSec - TimeUnit.MINUTES.toSeconds(1), ProgramRunStatus.COMPLETED);
assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
}
use of co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class ProgramScheduleStoreDatasetTest method testFindSchedulesByEventAndUpdateSchedule.
@Test
public void testFindSchedulesByEventAndUpdateSchedule() throws Exception {
DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
Assert.assertNotNull(store);
TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
final ProgramSchedule sched11 = new ProgramSchedule("sched11", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
final ProgramSchedule sched12 = new ProgramSchedule("sched12", "two partition schedule", PROG1_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22 = new ProgramSchedule("sched22", "twentytwo partition schedule", PROG2_ID, ImmutableMap.of("nn", "4"), new PartitionTrigger(DS2_ID, 22), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 or DS2 should trigger nothing. validate it returns an empty collection
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID)).isEmpty());
Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID)).isEmpty());
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.addSchedules(ImmutableList.of(sched11, sched12, sched22));
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched11
Assert.assertEquals(ImmutableSet.of(sched11), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers only sched12 and sched22
Assert.assertEquals(ImmutableSet.of(sched12, sched22), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
final ProgramSchedule sched11New = new ProgramSchedule(sched11.getName(), "time schedule", PROG1_ID, ImmutableMap.of("timeprop", "time"), new TimeTrigger("* * * * *"), ImmutableList.<Constraint>of());
final ProgramSchedule sched12New = new ProgramSchedule(sched12.getName(), "one partition schedule", PROG1_ID, ImmutableMap.of("pp", "p"), new PartitionTrigger(DS1_ID, 2), ImmutableList.<Constraint>of());
final ProgramSchedule sched22New = new ProgramSchedule(sched22.getName(), "one streamsize schedule", PROG2_ID, ImmutableMap.of("ss", "s"), new StreamSizeTrigger(NS_ID.stream("stream"), 1), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.updateSchedule(sched11New);
store.updateSchedule(sched12New);
store.updateSchedule(sched22New);
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// event for DS1 should trigger only sched12New after update
Assert.assertEquals(ImmutableSet.of(sched12New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
// event for DS2 triggers no schedule after update
Assert.assertEquals(ImmutableSet.<ProgramSchedule>of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
}
});
}
use of co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class ProgramScheduleStoreDataset method deleteSchedules.
/**
* Removes all schedules for a specific program from the store.
*
* @param programId the program id for which to delete the schedules
* @return the IDs of the schedules that were deleted
*/
public List<ScheduleId> deleteSchedules(ProgramId programId) {
List<ScheduleId> deleted = new ArrayList<>();
// since all trigger row keys are prefixed by <scheduleRowKey>@,
// a scan for that prefix finds exactly the schedules and all of its triggers
byte[] prefix = keyPrefixForApplicationScan(programId.getParent());
try (Scanner scanner = store.scan(new Scan(prefix, Bytes.stopKeyForPrefix(prefix)))) {
Row row;
while ((row = scanner.next()) != null) {
byte[] serialized = row.get(SCHEDULE_COLUMN_BYTES);
if (serialized != null) {
ProgramSchedule schedule = GSON.fromJson(Bytes.toString(serialized), ProgramSchedule.class);
if (programId.equals(schedule.getProgramId())) {
store.delete(row.getRow());
deleted.add(schedule.getScheduleId());
}
}
}
}
return deleted;
}
Aggregations