use of io.cdap.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class DeleteAndCreateSchedulesStage method process.
@Override
public void process(final ApplicationWithPrograms input) throws Exception {
if (!input.canUpdateSchedules()) {
// if we cant update schedules, emit and return
emit(input);
return;
}
ApplicationId appId = input.getApplicationId();
// Get a set of new schedules from the app spec
Set<ProgramSchedule> newSchedules = getProgramScheduleSet(appId, input.getSpecification());
for (ProgramSchedule schedule : programScheduler.listSchedules(appId)) {
if (newSchedules.contains(schedule)) {
// Remove the existing schedule from the newSchedules
newSchedules.remove(schedule);
continue;
}
// Delete the existing schedule if it is not present in newSchedules
programScheduler.deleteSchedule(schedule.getScheduleId());
}
// Add new schedules
programScheduler.addSchedules(newSchedules);
// Emit the input to next stage.
emit(input);
}
use of io.cdap.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class ConcurrencyConstraintTest method testMaxConcurrentRuns.
@Test
public void testMaxConcurrentRuns() {
Store store = AppFabricTestHelper.getInjector().getInstance(Store.class);
try {
long now = System.currentTimeMillis();
ProgramSchedule schedule = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.of());
SimpleJob job = new SimpleJob(schedule, 0, now, Collections.emptyList(), Job.State.PENDING_TRIGGER, 0L);
ConcurrencyConstraint concurrencyConstraint = new ConcurrencyConstraint(2);
ConstraintContext constraintContext = new ConstraintContext(job, now, store);
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
ProgramRunId pid1 = WORKFLOW_ID.run(RunIds.generate().getId());
ProgramRunId pid2 = WORKFLOW_ID.run(RunIds.generate().getId());
ProgramRunId pid3 = WORKFLOW_ID.run(RunIds.generate().getId());
// add a run for the schedule
Map<String, String> systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, schedule.getName());
setStartAndRunning(store, pid1, EMPTY_MAP, systemArgs);
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// add a run for the program from a different schedule. Since there are now 2 running instances of the
// workflow (regardless of the schedule name), the constraint is not met
systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, "not" + schedule.getName());
setStartAndRunning(store, pid2, EMPTY_MAP, systemArgs);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// add a run for the program that wasn't from a schedule
// there are now three concurrent runs, so the constraint will not be met
setStartAndRunning(store, pid3);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// stop the first program; constraint will not be satisfied as there are still 2 running
store.setStop(pid1, System.currentTimeMillis(), ProgramRunStatus.COMPLETED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// suspending/resuming the workflow doesn't reduce its concurrency count
store.setSuspend(pid3, AppFabricTestHelper.createSourceId(++sourceId), -1);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
store.setResume(pid3, AppFabricTestHelper.createSourceId(++sourceId), -1);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// but the constraint will be satisfied with it completes, as there is only 1 remaining RUNNING
store.setStop(pid3, System.currentTimeMillis(), ProgramRunStatus.KILLED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// add a run in provisioning state, constraint will not be satisfied since active runs increased to 2
ProgramRunId pid4 = WORKFLOW_ID.run(RunIds.generate().getId());
setProvisioning(store, pid4, Collections.emptyMap(), Collections.emptyMap());
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// stop the provisioning run, constraint will be satisfied since active runs decreased to 1
store.setStop(pid4, System.currentTimeMillis(), ProgramRunStatus.FAILED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// stopping the last running workflow will also satisfy the constraint
store.setStop(pid2, System.currentTimeMillis(), ProgramRunStatus.FAILED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
} finally {
AppFabricTestHelper.shutdown();
}
}
use of io.cdap.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class JobQueueTableTest method testJobTimeout.
@Test
public void testJobTimeout() {
TransactionRunners.run(transactionRunner, context -> {
JobQueueTable jobQueue = JobQueueTable.getJobQueue(context, getCConf());
// should be 0 jobs in the JobQueue to begin with
Assert.assertEquals(0, getAllJobs(jobQueue, false).size());
// Construct a partition notification with DATASET_ID
Notification notification = Notification.forPartitions(DATASET_ID, ImmutableList.of());
Assert.assertNull(jobQueue.getJob(SCHED1_JOB.getJobKey()));
ProgramSchedule scheduleWithTimeout = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.of());
Job jobWithTimeout = new SimpleJob(scheduleWithTimeout, 0, System.currentTimeMillis() - (Schedulers.JOB_QUEUE_TIMEOUT_MILLIS + 1), Lists.newArrayList(), Job.State.PENDING_TRIGGER, 0L);
jobQueue.put(jobWithTimeout);
Assert.assertEquals(jobWithTimeout, jobQueue.getJob(jobWithTimeout.getJobKey()));
// before adding the notification, there should just be the job we added
Assert.assertEquals(1, toSet(jobQueue.getJobsForSchedule(scheduleWithTimeout.getScheduleId()), true).size());
// adding a notification will ignore the existing job (because it is timed out). It will create a new job
// and add the notification to that new job
jobQueue.addNotification(new ProgramScheduleRecord(SCHED1, new ProgramScheduleMeta(ProgramScheduleStatus.SCHEDULED, 0L)), notification);
List<Job> jobs = new ArrayList<>(toSet(jobQueue.getJobsForSchedule(scheduleWithTimeout.getScheduleId()), true));
// sort by creation time (oldest will be first in the list)
Collections.sort(jobs, (o1, o2) -> Long.valueOf(o1.getCreationTime()).compareTo(o2.getCreationTime()));
Assert.assertEquals(2, jobs.size());
Job firstJob = jobs.get(0);
// first job should have the same creation timestamp as the initially added job
Assert.assertEquals(jobWithTimeout.getCreationTime(), firstJob.getCreationTime());
// the notification we added shouldn't be in the first job
Assert.assertEquals(0, firstJob.getNotifications().size());
// first job should be marked to be deleted because it timed out
Assert.assertTrue(firstJob.isToBeDeleted());
// first job should have the same generation id as the timed out job
Assert.assertEquals(jobWithTimeout.getGenerationId(), firstJob.getGenerationId());
Job secondJob = jobs.get(1);
// first job should not have the same creation timestamp as the initially added job
Assert.assertNotEquals(jobWithTimeout.getCreationTime(), secondJob.getCreationTime());
// the notification we added shouldn't be in the first job
Assert.assertEquals(1, secondJob.getNotifications().size());
Assert.assertEquals(notification, secondJob.getNotifications().get(0));
// first job should not be marked to be deleted, since it was just created by our call to
// JobQueue#addNotification
Assert.assertFalse(secondJob.isToBeDeleted());
// second job should have the next generation id
Assert.assertEquals(jobWithTimeout.getGenerationId() + 1, secondJob.getGenerationId());
});
}
use of io.cdap.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class JobQueueTableTest method testJobQueueIteration.
@Test
public void testJobQueueIteration() {
// Test that getJobs can be leveraged to iterate over the JobQueue partially (and across transactions).
// The Job last consumed from the Iterator returned in the first iteration can be passed to the next call to resume
// scanning from
TransactionRunners.run(transactionRunner, context -> {
JobQueueTable jobQueue = JobQueueTable.getJobQueue(context, getCConf());
// should be 0 jobs in the JobQueue to begin with
Assert.assertEquals(0, getAllJobs(jobQueue).size());
Multimap<Integer, Job> jobsByPartition = HashMultimap.create();
long now = 1494353984967L;
for (int i = 0; i < 100; i++) {
ProgramSchedule schedule = new ProgramSchedule("sched" + i, "one partition schedule", WORKFLOW_ID, ImmutableMap.of(), new PartitionTrigger(DATASET_ID, 1), ImmutableList.of());
Job job = new SimpleJob(schedule, i, now + i, ImmutableList.of(), Job.State.PENDING_TRIGGER, 0L);
jobsByPartition.put(jobQueue.getPartition(schedule.getScheduleId()), job);
jobQueue.put(job);
}
// in partition 0, there should be at least two Jobs for us to even test resumption of job queue iteration
Set<Job> partitionZeroJobs = new HashSet<>(jobsByPartition.get(0));
Assert.assertTrue(partitionZeroJobs.size() > 1);
// sanity check that all 100 jobs are in the JobQueue
Assert.assertEquals(jobsByPartition.size(), getAllJobs(jobQueue).size());
Assert.assertEquals(partitionZeroJobs, toSet(jobQueue.getJobs(0, null)));
// consume just 1 job in the first partition. Then, use it to specify the exclusive starting point in the
// next call to getJobs
Job firstConsumedJob;
try (CloseableIterator<Job> partitionZeroJobsIter = jobQueue.getJobs(0, null)) {
Assert.assertTrue(partitionZeroJobsIter.hasNext());
firstConsumedJob = partitionZeroJobsIter.next();
}
// the Jobs consumed in the second iteration should be all except the Job consumed in the first iteration
Set<Job> consumedInSecondIteration = toSet(jobQueue.getJobs(0, firstConsumedJob));
Assert.assertEquals(partitionZeroJobs.size() - 1, consumedInSecondIteration.size());
Set<Job> consumedJobs = new HashSet<>(consumedInSecondIteration);
consumedJobs.add(firstConsumedJob);
Assert.assertEquals(partitionZeroJobs, consumedJobs);
});
}
use of io.cdap.cdap.internal.app.runtime.schedule.ProgramSchedule in project cdap by caskdata.
the class MetadataSubscriberServiceTest method testProfileMetadata.
@Test
public void testProfileMetadata() throws Exception {
Injector injector = getInjector();
ApplicationSpecification appSpec = Specifications.from(new AppWithWorkflow());
ApplicationId appId = NamespaceId.DEFAULT.app(appSpec.getName());
ProgramId workflowId = appId.workflow("SampleWorkflow");
ScheduleId scheduleId = appId.schedule("tsched1");
// publish a creation of a schedule that will never exist
// this tests that such a message is eventually discarded
// note that for this test, we configure a fast retry strategy and a small number of retries
// therefore this will cost only a few seconds delay
publishBogusCreationEvent();
// get the mds should be empty property since we haven't started the MetadataSubscriberService
MetadataStorage mds = injector.getInstance(MetadataStorage.class);
Assert.assertEquals(Collections.emptyMap(), mds.read(new Read(workflowId.toMetadataEntity())).getProperties());
Assert.assertEquals(Collections.emptyMap(), mds.read(new Read(scheduleId.toMetadataEntity())).getProperties());
// add a app with workflow to app meta store
// note: since we bypass the app-fabric when adding this app, no ENTITY_CREATION message
// will be published for the app (it happens in app lifecycle service). Therefore this
// app must exist before assigning the profile for the namespace, otherwise the app's
// programs will not receive the profile metadata.
Store store = injector.getInstance(DefaultStore.class);
store.addApplication(appId, appSpec);
// set default namespace to use the profile, since now MetadataSubscriberService is not started,
// it should not affect the mds
PreferencesService preferencesService = injector.getInstance(PreferencesService.class);
preferencesService.setProperties(NamespaceId.DEFAULT, Collections.singletonMap(SystemArguments.PROFILE_NAME, ProfileId.NATIVE.getScopedName()));
// add a schedule to schedule store
ProgramScheduleService scheduleService = injector.getInstance(ProgramScheduleService.class);
scheduleService.add(new ProgramSchedule("tsched1", "one time schedule", workflowId, Collections.emptyMap(), new TimeTrigger("* * ? * 1"), ImmutableList.of()));
// add a new profile in default namespace
ProfileService profileService = injector.getInstance(ProfileService.class);
ProfileId myProfile = new ProfileId(NamespaceId.DEFAULT.getNamespace(), "MyProfile");
Profile profile1 = new Profile("MyProfile", Profile.NATIVE.getLabel(), Profile.NATIVE.getDescription(), Profile.NATIVE.getScope(), Profile.NATIVE.getProvisioner());
profileService.saveProfile(myProfile, profile1);
// add a second profile in default namespace
ProfileId myProfile2 = new ProfileId(NamespaceId.DEFAULT.getNamespace(), "MyProfile2");
Profile profile2 = new Profile("MyProfile2", Profile.NATIVE.getLabel(), Profile.NATIVE.getDescription(), Profile.NATIVE.getScope(), Profile.NATIVE.getProvisioner());
profileService.saveProfile(myProfile2, profile2);
try {
// Verify the workflow profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// set default namespace to use my profile
preferencesService.setProperties(NamespaceId.DEFAULT, Collections.singletonMap(SystemArguments.PROFILE_NAME, "USER:MyProfile"));
// Verify the workflow profile metadata is updated to my profile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to my profile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// set app level to use my profile 2
preferencesService.setProperties(appId, Collections.singletonMap(SystemArguments.PROFILE_NAME, "USER:MyProfile2"));
// set instance level to system profile
preferencesService.setProperties(Collections.singletonMap(SystemArguments.PROFILE_NAME, ProfileId.NATIVE.getScopedName()));
// Verify the workflow profile metadata is updated to MyProfile2 which is at app level
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to MyProfile2 which is at app level
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// remove the preferences at instance level, should not affect the metadata
preferencesService.deleteProperties();
// Verify the workflow profile metadata is updated to default profile
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to default profile
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// remove app level pref should let the programs/schedules use ns level pref
preferencesService.deleteProperties(appId);
// Verify the workflow profile metadata is updated to MyProfile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to MyProfile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// remove ns level pref so no pref is there
preferencesService.deleteProperties(NamespaceId.DEFAULT);
// Verify the workflow profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
// stop and clean up the store
preferencesService.deleteProperties(NamespaceId.DEFAULT);
preferencesService.deleteProperties();
preferencesService.deleteProperties(appId);
store.removeAll(NamespaceId.DEFAULT);
scheduleService.delete(scheduleId);
profileService.disableProfile(myProfile);
profileService.disableProfile(myProfile2);
profileService.deleteAllProfiles(myProfile.getNamespaceId());
mds.apply(new MetadataMutation.Drop(workflowId.toMetadataEntity()), MutationOptions.DEFAULT);
mds.apply(new MetadataMutation.Drop(scheduleId.toMetadataEntity()), MutationOptions.DEFAULT);
}
}
Aggregations