use of co.cask.cdap.internal.app.runtime.schedule.queue.Job in project cdap by caskdata.
the class CoreSchedulerService method cleanupJobs.
// Attempts to remove all jobs that are in PENDING_LAUNCH state.
// These are jobs that were about to be launched, but the scheduler shut down or crashed after the job was marked
// PENDING_LAUNCH, but before they were actually launched.
// This should only be called at startup.
private void cleanupJobs() {
try {
transactional.execute(context -> {
JobQueueDataset jobQueue = Schedulers.getJobQueue(context, datasetFramework);
try (CloseableIterator<Job> jobIter = jobQueue.fullScan()) {
LOG.info("Cleaning up jobs in state {}.", Job.State.PENDING_LAUNCH);
while (jobIter.hasNext()) {
Job job = jobIter.next();
if (job.getState() == Job.State.PENDING_LAUNCH) {
LOG.warn("Removing job because it was left in state {} from a previous run of the scheduler: {} .", Job.State.PENDING_LAUNCH, job);
jobQueue.deleteJob(job);
}
}
}
});
} catch (TransactionFailureException exception) {
LOG.warn("Failed to cleanup jobs upon startup.", exception);
}
}
use of co.cask.cdap.internal.app.runtime.schedule.queue.Job in project cdap by caskdata.
the class CoreSchedulerServiceTest method testRunScheduledJobs.
@Test
@Category(XSlowTests.class)
public void testRunScheduledJobs() throws Exception {
CConfiguration cConf = getInjector().getInstance(CConfiguration.class);
dataEventTopic = NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC));
// Deploy the app with version
Id.Artifact appArtifactId = Id.Artifact.from(Id.Namespace.DEFAULT, "appwithschedules", VERSION1);
addAppArtifact(appArtifactId, AppWithFrequentScheduledWorkflows.class);
AppRequest<? extends Config> appRequest = new AppRequest<>(new ArtifactSummary(appArtifactId.getName(), appArtifactId.getVersion().getVersion()));
deploy(APP_ID, appRequest);
// Resume the schedule because schedules are initialized as paused
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_2);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
for (int i = 0; i < 5; i++) {
testNewPartition(i + 1);
}
// Enable COMPOSITE_SCHEDULE before publishing events to DATASET_NAME2
enableSchedule(AppWithFrequentScheduledWorkflows.COMPOSITE_SCHEDULE);
// disable the two partition schedules, send them notifications (but they should not trigger)
int runs1 = getRuns(WORKFLOW_1, ProgramRunStatus.ALL);
int runs2 = getRuns(WORKFLOW_2, ProgramRunStatus.ALL);
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME1);
long minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Both workflows must run at least once.
// If the testNewPartition() loop took longer than expected, it may be more (quartz fired multiple times)
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.COMPLETED) > 0 && getRuns(SCHEDULED_WORKFLOW_2, ProgramRunStatus.COMPLETED) > 0;
}
}, 10, TimeUnit.SECONDS);
// There shouldn't be any partition trigger in the job queue
Assert.assertFalse(Iterables.any(getAllJobs(), new Predicate<Job>() {
@Override
public boolean apply(Job job) {
return job.getSchedule().getTrigger() instanceof ProtoTrigger.PartitionTrigger;
}
}));
ProgramId compositeWorkflow = APP_ID.workflow(AppWithFrequentScheduledWorkflows.COMPOSITE_WORKFLOW);
// Workflow scheduled with the composite trigger has never been started
Assert.assertEquals(0, getRuns(compositeWorkflow, ProgramRunStatus.ALL));
// Publish two more new partition notifications to satisfy the partition trigger in the composite trigger,
// and thus the whole composite trigger will be satisfied
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Wait for 1 run to complete for compositeWorkflow
waitForCompleteRuns(1, compositeWorkflow);
for (RunRecordMeta runRecordMeta : store.getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.ALL, 0, Long.MAX_VALUE, Integer.MAX_VALUE).values()) {
Map<String, String> sysArgs = runRecordMeta.getSystemArgs();
Assert.assertNotNull(sysArgs);
TriggeringScheduleInfo scheduleInfo = GSON.fromJson(sysArgs.get(ProgramOptionConstants.TRIGGERING_SCHEDULE_INFO), TriggeringScheduleInfo.class);
Assert.assertEquals(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1, scheduleInfo.getName());
List<TriggerInfo> triggerInfos = scheduleInfo.getTriggerInfos();
// Only one notification is enough to satisfy Time Trigger
Assert.assertEquals(1, triggerInfos.size());
Assert.assertEquals(TriggerInfo.Type.TIME, triggerInfos.get(0).getType());
}
// Also verify that the two partition schedules did not trigger
Assert.assertEquals(runs1, getRuns(WORKFLOW_1, ProgramRunStatus.ALL));
Assert.assertEquals(runs2, getRuns(WORKFLOW_2, ProgramRunStatus.ALL));
// enable partition schedule 2
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
testScheduleUpdate("disable");
testScheduleUpdate("update");
testScheduleUpdate("delete");
}
use of co.cask.cdap.internal.app.runtime.schedule.queue.Job in project cdap by caskdata.
the class CoreSchedulerServiceTest method testScheduleUpdate.
private void testScheduleUpdate(String howToUpdate) throws Exception {
int runs = getRuns(WORKFLOW_2, ProgramRunStatus.ALL);
final ScheduleId scheduleId2 = APP_ID.schedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
// send one notification to it
long minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
waitUntilProcessed(dataEventTopic, minPublishTime);
// A pending job will be created, but it won't run
Assert.assertTrue("Expected a PENDING_TRIGGER job for " + scheduleId2, Iterables.any(getAllJobs(), new Predicate<Job>() {
@Override
public boolean apply(Job job) {
if (!(job.getSchedule().getTrigger() instanceof ProtoTrigger.PartitionTrigger)) {
return false;
}
return scheduleId2.equals(job.getJobKey().getScheduleId()) && job.getState() == Job.State.PENDING_TRIGGER;
}
}));
Assert.assertEquals(runs, getRuns(WORKFLOW_2, ProgramRunStatus.ALL));
if ("disable".equals(howToUpdate)) {
// disabling and enabling the schedule should remove the job
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
} else {
ProgramSchedule schedule = scheduler.getSchedule(scheduleId2);
Map<String, String> updatedProperties = ImmutableMap.<String, String>builder().putAll(schedule.getProperties()).put(howToUpdate, howToUpdate).build();
ProgramSchedule updatedSchedule = new ProgramSchedule(schedule.getName(), schedule.getDescription(), schedule.getProgramId(), updatedProperties, schedule.getTrigger(), schedule.getConstraints());
if ("update".equals(howToUpdate)) {
scheduler.updateSchedule(updatedSchedule);
Assert.assertEquals(ProgramScheduleStatus.SCHEDULED, scheduler.getScheduleStatus(scheduleId2));
} else if ("delete".equals(howToUpdate)) {
scheduler.deleteSchedule(scheduleId2);
scheduler.addSchedule(updatedSchedule);
enableSchedule(scheduleId2.getSchedule());
} else {
Assert.fail("invalid howToUpdate: " + howToUpdate);
}
}
// single notification should not trigger workflow 2 yet (if it does, then the job was not removed)
minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
waitUntilProcessed(dataEventTopic, minPublishTime);
// Again, a pending job will be created, but it won't run since updating the schedule would remove pending trigger
Assert.assertTrue("Expected a PENDING_TRIGGER job for " + scheduleId2, Iterables.any(getAllJobs(), new Predicate<Job>() {
@Override
public boolean apply(Job job) {
if (!(job.getSchedule().getTrigger() instanceof ProtoTrigger.PartitionTrigger)) {
return false;
}
return scheduleId2.equals(job.getJobKey().getScheduleId()) && job.getState() == Job.State.PENDING_TRIGGER;
}
}));
Assert.assertEquals(runs, getRuns(WORKFLOW_2, ProgramRunStatus.ALL));
// publish one more notification, this should kick off the workflow
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
waitForCompleteRuns(runs + 1, WORKFLOW_2);
}
Aggregations