Search in sources :

Example 1 with PartitionTrigger

use of co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.

the class ConcurrencyConstraintTest method testMaxConcurrentRuns.

@Test
public void testMaxConcurrentRuns() {
    Store store = AppFabricTestHelper.getInjector().getInstance(Store.class);
    long now = System.currentTimeMillis();
    ProgramSchedule schedule = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.<Constraint>of());
    SimpleJob job = new SimpleJob(schedule, now, Collections.<Notification>emptyList(), Job.State.PENDING_TRIGGER, 0L);
    ConcurrencyConstraint concurrencyConstraint = new ConcurrencyConstraint(2);
    ConstraintContext constraintContext = new ConstraintContext(job, now, store);
    assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
    String pid1 = RunIds.generate().getId();
    String pid2 = RunIds.generate().getId();
    String pid3 = RunIds.generate().getId();
    // add a run for the schedule
    Map<String, String> systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, schedule.getName());
    store.setStart(WORKFLOW_ID, pid1, System.currentTimeMillis(), null, EMPTY_MAP, systemArgs);
    assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
    // add a run for the program from a different schedule. Since there are now 2 running instances of the
    // workflow (regardless of the schedule name), the constraint is not met
    systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, "not" + schedule.getName());
    store.setStart(WORKFLOW_ID, pid2, System.currentTimeMillis(), null, EMPTY_MAP, systemArgs);
    assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
    // add a run for the program that wasn't from a schedule
    // there are now three concurrent runs, so the constraint will not be met
    store.setStart(WORKFLOW_ID, pid3, System.currentTimeMillis(), null, EMPTY_MAP, EMPTY_MAP);
    assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
    // stop the first program; constraint will not be satisfied as there are still 2 running
    store.setStop(WORKFLOW_ID, pid1, System.currentTimeMillis(), ProgramRunStatus.COMPLETED);
    assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
    // suspending/resuming the workflow doesn't reduce its concurrency count
    store.setSuspend(WORKFLOW_ID, pid3);
    assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
    store.setResume(WORKFLOW_ID, pid3);
    assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
    // but the constraint will be satisfied with it completes, as there is only 1 remaining RUNNING
    store.setStop(WORKFLOW_ID, pid3, System.currentTimeMillis(), ProgramRunStatus.KILLED);
    assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
    // stopping the last running workflow will also satisfy the constraint
    store.setStop(WORKFLOW_ID, pid2, System.currentTimeMillis(), ProgramRunStatus.FAILED);
    assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
}
Also used : ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) Store(co.cask.cdap.app.store.Store) SimpleJob(co.cask.cdap.internal.app.runtime.schedule.queue.SimpleJob) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) Test(org.junit.Test)

Example 2 with PartitionTrigger

use of co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.

the class DelayConstraintTest method testDelayConstraint.

@Test
public void testDelayConstraint() {
    long now = System.currentTimeMillis();
    ProgramSchedule schedule = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.<Constraint>of());
    SimpleJob job = new SimpleJob(schedule, now, Collections.<Notification>emptyList(), Job.State.PENDING_TRIGGER, 0L);
    // test with 10 minute delay
    DelayConstraint tenMinuteDelayConstraint = new DelayConstraint(10, TimeUnit.MINUTES);
    // a check against 12 minutes after 'now' will return SATISFIED
    ConstraintContext constraintContext = new ConstraintContext(job, now + TimeUnit.MINUTES.toMillis(12), null);
    ConstraintResult result = tenMinuteDelayConstraint.check(schedule, constraintContext);
    Assert.assertEquals(ConstraintResult.SATISFIED, result);
    // a check against 9 minutes after 'now' will return NOT_SATISFIED, with 1 minute to wait until next retry
    constraintContext = new ConstraintContext(job, now + TimeUnit.MINUTES.toMillis(9), null);
    result = tenMinuteDelayConstraint.check(schedule, constraintContext);
    Assert.assertEquals(ConstraintResult.SatisfiedState.NOT_SATISFIED, result.getSatisfiedState());
    Assert.assertEquals(constraintContext.getCheckTimeMillis() + TimeUnit.MINUTES.toMillis(1), (long) result.getNextCheckTime());
}
Also used : ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) SimpleJob(co.cask.cdap.internal.app.runtime.schedule.queue.SimpleJob) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) Test(org.junit.Test)

Example 3 with PartitionTrigger

use of co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.

the class CoreSchedulerServiceTest method addListDeleteSchedules.

@Test
public void addListDeleteSchedules() throws Exception {
    // verify that list returns nothing
    Assert.assertTrue(scheduler.listSchedules(APP1_ID).isEmpty());
    Assert.assertTrue(scheduler.listSchedules(PROG1_ID).isEmpty());
    // add a schedule for app1
    ProgramSchedule tsched1 = new ProgramSchedule("tsched1", "one time schedule", PROG1_ID, ImmutableMap.of("prop1", "nn"), new TimeTrigger("* * ? * 1"), ImmutableList.<Constraint>of());
    scheduler.addSchedule(tsched1);
    Assert.assertEquals(tsched1, scheduler.getSchedule(TSCHED1_ID));
    Assert.assertEquals(ImmutableList.of(tsched1), scheduler.listSchedules(APP1_ID));
    Assert.assertEquals(ImmutableList.of(tsched1), scheduler.listSchedules(PROG1_ID));
    // add three more schedules, one for the same program, one for the same app, one for another app
    ProgramSchedule psched1 = new ProgramSchedule("psched1", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
    ProgramSchedule tsched11 = new ProgramSchedule("tsched11", "two times schedule", PROG11_ID, ImmutableMap.of("prop2", "xx"), new TimeTrigger("* * ? * 1,2"), ImmutableList.<Constraint>of());
    ProgramSchedule psched2 = new ProgramSchedule("psched2", "two partition schedule", PROG2_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
    scheduler.addSchedules(ImmutableList.of(psched1, tsched11, psched2));
    Assert.assertEquals(psched1, scheduler.getSchedule(PSCHED1_ID));
    Assert.assertEquals(tsched11, scheduler.getSchedule(TSCHED11_ID));
    Assert.assertEquals(psched2, scheduler.getSchedule(PSCHED2_ID));
    // list by app and program
    Assert.assertEquals(ImmutableList.of(psched1, tsched1), scheduler.listSchedules(PROG1_ID));
    Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
    Assert.assertEquals(ImmutableList.of(psched1, tsched1, tsched11), scheduler.listSchedules(APP1_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
    // delete one schedule
    scheduler.deleteSchedule(TSCHED1_ID);
    verifyNotFound(scheduler, TSCHED1_ID);
    Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
    Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
    Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
    // attempt to delete it again along with another one that exists
    try {
        scheduler.deleteSchedules(ImmutableList.of(TSCHED1_ID, TSCHED11_ID));
        Assert.fail("expected NotFoundException");
    } catch (NotFoundException e) {
    // expected
    }
    Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
    Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
    Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
    // attempt to add it back together with a schedule that exists
    try {
        scheduler.addSchedules(ImmutableList.of(tsched1, tsched11));
        Assert.fail("expected AlreadyExistsException");
    } catch (AlreadyExistsException e) {
    // expected
    }
    Assert.assertEquals(ImmutableList.of(psched1), scheduler.listSchedules(PROG1_ID));
    Assert.assertEquals(ImmutableList.of(tsched11), scheduler.listSchedules(PROG11_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
    Assert.assertEquals(ImmutableList.of(psched1, tsched11), scheduler.listSchedules(APP1_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(APP2_ID));
    // add it back, delete all schedules for one app
    scheduler.addSchedule(tsched1);
    scheduler.deleteSchedules(APP1_ID);
    verifyNotFound(scheduler, TSCHED1_ID);
    verifyNotFound(scheduler, PSCHED1_ID);
    verifyNotFound(scheduler, TSCHED11_ID);
    Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(PROG1_ID));
    Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(PROG11_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
    Assert.assertEquals(ImmutableList.of(), scheduler.listSchedules(APP1_ID));
    Assert.assertEquals(ImmutableList.of(psched2), scheduler.listSchedules(PROG2_ID));
}
Also used : TimeTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.TimeTrigger) AlreadyExistsException(co.cask.cdap.common.AlreadyExistsException) ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) TopicNotFoundException(co.cask.cdap.api.messaging.TopicNotFoundException) NotFoundException(co.cask.cdap.common.NotFoundException) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) Test(org.junit.Test)

Example 4 with PartitionTrigger

use of co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.

the class LastRunConstraintTest method testLastRunConstraint.

@Test
public void testLastRunConstraint() {
    Store store = AppFabricTestHelper.getInjector().getInstance(Store.class);
    long now = System.currentTimeMillis();
    long nowSec = TimeUnit.MILLISECONDS.toSeconds(now);
    ProgramSchedule schedule = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.<Constraint>of());
    SimpleJob job = new SimpleJob(schedule, now, Collections.<Notification>emptyList(), Job.State.PENDING_TRIGGER, 0L);
    // require 1 hour since last run
    LastRunConstraint lastRunConstraint = new LastRunConstraint(1, TimeUnit.HOURS);
    ConstraintContext constraintContext = new ConstraintContext(job, now, store);
    // there's been no runs, so the constraint is satisfied by default
    assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
    String pid1 = RunIds.generate().getId();
    String pid2 = RunIds.generate().getId();
    String pid3 = RunIds.generate().getId();
    String pid4 = RunIds.generate().getId();
    // a RUNNING workflow, started 3 hours ago will fail the constraint check
    Map<String, String> systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, schedule.getName());
    store.setStart(WORKFLOW_ID, pid1, nowSec - TimeUnit.HOURS.toSeconds(3), null, EMPTY_MAP, systemArgs);
    assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
    // a SUSPENDED workflow started 3 hours ago will also fail the constraint check
    store.setSuspend(WORKFLOW_ID, pid1);
    assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
    store.setResume(WORKFLOW_ID, pid1);
    assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
    // if that same workflow runs completes 2 hours ago, the constraint check will be satisfied
    store.setStop(WORKFLOW_ID, pid1, nowSec - TimeUnit.HOURS.toSeconds(2), ProgramRunStatus.COMPLETED);
    assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
    // a RUNNING workflow, started 2 hours ago will fail the constraint check
    store.setStart(WORKFLOW_ID, pid2, nowSec - TimeUnit.HOURS.toSeconds(2), null, EMPTY_MAP, EMPTY_MAP);
    assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
    // if that same workflow run fails 1 minute ago, the constraint check will be satisfied
    store.setStop(WORKFLOW_ID, pid2, nowSec - TimeUnit.MINUTES.toSeconds(1), ProgramRunStatus.FAILED);
    assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
    // similarly, a KILLED workflow, started 2 hours ago will also fail the constraint check
    store.setStart(WORKFLOW_ID, pid3, nowSec - TimeUnit.HOURS.toSeconds(2), null, EMPTY_MAP, EMPTY_MAP);
    assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
    store.setStop(WORKFLOW_ID, pid3, nowSec - TimeUnit.MINUTES.toSeconds(1), ProgramRunStatus.KILLED);
    assertSatisfied(true, lastRunConstraint.check(schedule, constraintContext));
    // a RUNNING workflow, started 2 hours ago will fail the constraint check
    store.setStart(WORKFLOW_ID, pid4, nowSec - TimeUnit.HOURS.toSeconds(2), null, EMPTY_MAP, EMPTY_MAP);
    assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
    // if that same workflow runs completes 1 minute ago, the constraint check will not be satisfied
    store.setStop(WORKFLOW_ID, pid4, nowSec - TimeUnit.MINUTES.toSeconds(1), ProgramRunStatus.COMPLETED);
    assertSatisfied(false, lastRunConstraint.check(schedule, constraintContext));
}
Also used : ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) Store(co.cask.cdap.app.store.Store) SimpleJob(co.cask.cdap.internal.app.runtime.schedule.queue.SimpleJob) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) Test(org.junit.Test)

Example 5 with PartitionTrigger

use of co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger in project cdap by caskdata.

the class ProgramScheduleStoreDatasetTest method testFindSchedulesByEventAndUpdateSchedule.

@Test
public void testFindSchedulesByEventAndUpdateSchedule() throws Exception {
    DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
    TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
    TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
    final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
    Assert.assertNotNull(store);
    TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
    final ProgramSchedule sched11 = new ProgramSchedule("sched11", "one partition schedule", PROG1_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DS1_ID, 1), ImmutableList.<Constraint>of());
    final ProgramSchedule sched12 = new ProgramSchedule("sched12", "two partition schedule", PROG1_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS2_ID, 2), ImmutableList.<Constraint>of());
    final ProgramSchedule sched22 = new ProgramSchedule("sched22", "twentytwo partition schedule", PROG2_ID, ImmutableMap.of("nn", "4"), new PartitionTrigger(DS2_ID, 22), ImmutableList.<Constraint>of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // event for DS1 or DS2 should trigger nothing. validate it returns an empty collection
            Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID)).isEmpty());
            Assert.assertTrue(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID)).isEmpty());
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.addSchedules(ImmutableList.of(sched11, sched12, sched22));
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // event for DS1 should trigger only sched11
            Assert.assertEquals(ImmutableSet.of(sched11), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
            // event for DS2 triggers only sched12 and sched22
            Assert.assertEquals(ImmutableSet.of(sched12, sched22), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
        }
    });
    final ProgramSchedule sched11New = new ProgramSchedule(sched11.getName(), "time schedule", PROG1_ID, ImmutableMap.of("timeprop", "time"), new TimeTrigger("* * * * *"), ImmutableList.<Constraint>of());
    final ProgramSchedule sched12New = new ProgramSchedule(sched12.getName(), "one partition schedule", PROG1_ID, ImmutableMap.of("pp", "p"), new PartitionTrigger(DS1_ID, 2), ImmutableList.<Constraint>of());
    final ProgramSchedule sched22New = new ProgramSchedule(sched22.getName(), "one streamsize schedule", PROG2_ID, ImmutableMap.of("ss", "s"), new StreamSizeTrigger(NS_ID.stream("stream"), 1), ImmutableList.<Constraint>of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.updateSchedule(sched11New);
            store.updateSchedule(sched12New);
            store.updateSchedule(sched22New);
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // event for DS1 should trigger only sched12New after update
            Assert.assertEquals(ImmutableSet.of(sched12New), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
            // event for DS2 triggers no schedule after update
            Assert.assertEquals(ImmutableSet.<ProgramSchedule>of(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS2_ID))));
        }
    });
}
Also used : TimeTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.TimeTrigger) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) TransactionExecutor(org.apache.tephra.TransactionExecutor) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) TransactionExecutorFactory(co.cask.cdap.data2.transaction.TransactionExecutorFactory) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) TransactionAware(org.apache.tephra.TransactionAware) StreamSizeTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.StreamSizeTrigger) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) Test(org.junit.Test)

Aggregations

PartitionTrigger (co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger)6 ProgramSchedule (co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule)5 Test (org.junit.Test)5 SimpleJob (co.cask.cdap.internal.app.runtime.schedule.queue.SimpleJob)3 TimeTrigger (co.cask.cdap.internal.app.runtime.schedule.trigger.TimeTrigger)3 Store (co.cask.cdap.app.store.Store)2 StreamSizeTrigger (co.cask.cdap.internal.app.runtime.schedule.trigger.StreamSizeTrigger)2 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)1 TopicNotFoundException (co.cask.cdap.api.messaging.TopicNotFoundException)1 AlreadyExistsException (co.cask.cdap.common.AlreadyExistsException)1 NotFoundException (co.cask.cdap.common.NotFoundException)1 DynamicTransactionExecutorFactory (co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory)1 DatasetFramework (co.cask.cdap.data2.dataset2.DatasetFramework)1 TransactionExecutorFactory (co.cask.cdap.data2.transaction.TransactionExecutorFactory)1 Constraint (co.cask.cdap.internal.schedule.constraint.Constraint)1 Notification (co.cask.cdap.proto.Notification)1 TransactionAware (org.apache.tephra.TransactionAware)1 TransactionExecutor (org.apache.tephra.TransactionExecutor)1 TransactionSystemClient (org.apache.tephra.TransactionSystemClient)1